aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2019-07-04 10:40:06 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2019-07-04 10:48:05 +0200
commit8d53e9f3c6001dcb2865f6e894da5b54e1418f88 (patch)
tree63907f21c13636a987d43463c675d0727a04e327
parente2bea7436061ca2e7e14bfcfdc5870f2555c3965 (diff)
New upstream version 18.11.2upstream-18.11-stable
Change-Id: I23eb4f9179abf1f9c659891f8fddb27ee68ad26b Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-rwxr-xr-x.ci/linux-build.sh24
-rwxr-xr-x.ci/linux-setup.sh3
-rw-r--r--.gitignore1
-rw-r--r--.travis.yml98
-rw-r--r--MAINTAINERS8
-rw-r--r--app/pdump/main.c17
-rw-r--r--app/test-bbdev/test_bbdev.c10
-rw-r--r--app/test-crypto-perf/main.c5
-rw-r--r--app/test-pmd/cmdline.c269
-rw-r--r--app/test-pmd/cmdline_flow.c128
-rw-r--r--app/test-pmd/config.c14
-rw-r--r--app/test-pmd/csumonly.c2
-rw-r--r--app/test-pmd/testpmd.c92
-rw-r--r--app/test-pmd/testpmd.h13
-rw-r--r--buildtools/symlink-drivers-solibs.sh7
-rw-r--r--config/arm/meson.build10
-rw-r--r--config/meson.build7
-rw-r--r--config/x86/meson.build21
-rwxr-xr-xdevtools/check-symbol-change.sh44
-rwxr-xr-xdevtools/checkpatches.sh2
-rwxr-xr-xdevtools/test-build.sh12
-rwxr-xr-xdevtools/test-meson-builds.sh10
-rw-r--r--doc/guides/compressdevs/overview.rst2
-rw-r--r--doc/guides/conf.py5
-rw-r--r--doc/guides/contributing/coding_style.rst4
-rw-r--r--doc/guides/contributing/documentation.rst9
-rw-r--r--doc/guides/contributing/patches.rst6
-rw-r--r--doc/guides/contributing/versioning.rst19
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst2
-rw-r--r--doc/guides/cryptodevs/features/aesni_gcm.ini5
-rw-r--r--doc/guides/cryptodevs/features/aesni_mb.ini14
-rw-r--r--doc/guides/cryptodevs/features/armv8.ini5
-rw-r--r--doc/guides/cryptodevs/features/caam_jr.ini5
-rw-r--r--doc/guides/cryptodevs/features/ccp.ini5
-rw-r--r--doc/guides/cryptodevs/features/default.ini9
-rw-r--r--doc/guides/cryptodevs/features/dpaa2_sec.ini5
-rw-r--r--doc/guides/cryptodevs/features/dpaa_sec.ini5
-rw-r--r--doc/guides/cryptodevs/features/kasumi.ini5
-rw-r--r--doc/guides/cryptodevs/features/mvsam.ini5
-rw-r--r--doc/guides/cryptodevs/features/null.ini5
-rw-r--r--doc/guides/cryptodevs/features/octeontx.ini5
-rw-r--r--doc/guides/cryptodevs/features/qat.ini5
-rw-r--r--doc/guides/cryptodevs/features/snow3g.ini5
-rw-r--r--doc/guides/cryptodevs/features/virtio.ini5
-rw-r--r--doc/guides/cryptodevs/features/zuc.ini5
-rw-r--r--doc/guides/cryptodevs/openssl.rst8
-rw-r--r--doc/guides/cryptodevs/overview.rst9
-rw-r--r--doc/guides/cryptodevs/scheduler.rst2
-rw-r--r--doc/guides/eventdevs/opdl.rst2
-rw-r--r--doc/guides/eventdevs/sw.rst4
-rw-r--r--doc/guides/howto/lm_bond_virtio_sriov.rst2
-rw-r--r--doc/guides/howto/lm_virtio_vhost_user.rst4
-rw-r--r--doc/guides/howto/rte_flow.rst6
-rw-r--r--doc/guides/howto/virtio_user_as_exceptional_path.rst8
-rw-r--r--doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst16
-rw-r--r--doc/guides/linux_gsg/sys_reqs.rst2
-rw-r--r--doc/guides/nics/atlantic.rst2
-rw-r--r--doc/guides/nics/cxgbe.rst4
-rw-r--r--doc/guides/nics/dpaa.rst2
-rw-r--r--doc/guides/nics/dpaa2.rst2
-rw-r--r--doc/guides/nics/enetc.rst2
-rw-r--r--doc/guides/nics/enic.rst13
-rw-r--r--doc/guides/nics/features.rst4
-rw-r--r--doc/guides/nics/features/qede.ini1
-rw-r--r--doc/guides/nics/i40e.rst2
-rw-r--r--doc/guides/nics/ixgbe.rst4
-rw-r--r--doc/guides/nics/kni.rst2
-rw-r--r--doc/guides/nics/mlx5.rst15
-rw-r--r--doc/guides/nics/mvpp2.rst2
-rw-r--r--doc/guides/nics/netvsc.rst2
-rw-r--r--doc/guides/nics/sfc_efx.rst14
-rw-r--r--doc/guides/nics/szedata2.rst2
-rw-r--r--doc/guides/nics/tap.rst2
-rw-r--r--doc/guides/platform/dpaa.rst4
-rw-r--r--doc/guides/platform/dpaa2.rst4
-rw-r--r--doc/guides/prog_guide/bbdev.rst4
-rw-r--r--doc/guides/prog_guide/compressdev.rst6
-rw-r--r--doc/guides/prog_guide/cryptodev_lib.rst12
-rw-r--r--doc/guides/prog_guide/dev_kit_build_system.rst4
-rw-r--r--doc/guides/prog_guide/efd_lib.rst2
-rw-r--r--doc/guides/prog_guide/env_abstraction_layer.rst32
-rw-r--r--doc/guides/prog_guide/event_ethernet_rx_adapter.rst6
-rw-r--r--doc/guides/prog_guide/eventdev.rst6
-rw-r--r--doc/guides/prog_guide/kernel_nic_interface.rst2
-rw-r--r--doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst8
-rw-r--r--doc/guides/prog_guide/lpm_lib.rst2
-rw-r--r--doc/guides/prog_guide/metrics_lib.rst2
-rw-r--r--doc/guides/prog_guide/multi_proc_support.rst22
-rw-r--r--doc/guides/prog_guide/poll_mode_drv.rst6
-rw-r--r--doc/guides/prog_guide/profile_app.rst4
-rw-r--r--doc/guides/prog_guide/rte_flow.rst8
-rw-r--r--doc/guides/prog_guide/rte_security.rst20
-rw-r--r--doc/guides/prog_guide/traffic_management.rst2
-rw-r--r--doc/guides/prog_guide/vhost_lib.rst2
-rw-r--r--doc/guides/rawdevs/ifpga_rawdev.rst2
-rw-r--r--doc/guides/rel_notes/known_issues.rst8
-rw-r--r--doc/guides/rel_notes/release_17_11.rst10
-rw-r--r--doc/guides/rel_notes/release_18_11.rst515
-rw-r--r--doc/guides/sample_app_ug/bbdev_app.rst4
-rw-r--r--doc/guides/sample_app_ug/eventdev_pipeline.rst2
-rw-r--r--doc/guides/sample_app_ug/intro.rst2
-rw-r--r--doc/guides/sample_app_ug/ip_pipeline.rst4
-rw-r--r--doc/guides/sample_app_ug/ipsec_secgw.rst4
-rw-r--r--doc/guides/sample_app_ug/performance_thread.rst4
-rw-r--r--doc/guides/sample_app_ug/qos_metering.rst2
-rw-r--r--doc/guides/sample_app_ug/test_pipeline.rst2
-rw-r--r--doc/guides/sample_app_ug/vhost.rst4
-rw-r--r--doc/guides/sample_app_ug/vhost_scsi.rst2
-rw-r--r--doc/guides/sample_app_ug/vm_power_management.rst15
-rw-r--r--doc/guides/testpmd_app_ug/run_app.rst12
-rw-r--r--doc/guides/testpmd_app_ug/testpmd_funcs.rst158
-rw-r--r--doc/guides/tools/cryptoperf.rst22
-rw-r--r--doc/guides/tools/proc_info.rst2
-rw-r--r--doc/guides/tools/testbbdev.rst10
-rw-r--r--drivers/bus/dpaa/base/fman/fman_hw.c4
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c6
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h2
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h6
-rw-r--r--drivers/bus/fslmc/qbman/qbman_debug.c5
-rw-r--r--drivers/bus/vdev/vdev.c17
-rw-r--r--drivers/bus/vmbus/linux/vmbus_uio.c56
-rw-r--r--drivers/bus/vmbus/private.h3
-rw-r--r--drivers/bus/vmbus/vmbus_channel.c20
-rw-r--r--drivers/bus/vmbus/vmbus_common_uio.c22
-rw-r--r--drivers/common/cpt/cpt_ucode.h2
-rw-r--r--drivers/common/qat/qat_qp.c14
-rw-r--r--drivers/compress/isal/isal_compress_pmd.c5
-rw-r--r--drivers/compress/isal/isal_compress_pmd_ops.c16
-rw-r--r--drivers/compress/qat/qat_comp_pmd.c9
-rw-r--r--drivers/crypto/caam_jr/caam_jr.c4
-rw-r--r--drivers/crypto/caam_jr/caam_jr_uio.c10
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c12
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/pdcp.h4
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c2
-rw-r--r--drivers/crypto/kasumi/meson.build8
-rw-r--r--drivers/crypto/meson.build2
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c12
-rw-r--r--drivers/crypto/qat/qat_sym_session.c3
-rw-r--r--drivers/crypto/snow3g/meson.build13
-rw-r--r--drivers/crypto/virtio/virtio_logs.h4
-rw-r--r--drivers/crypto/virtio/virtio_rxtx.c4
-rw-r--r--drivers/crypto/zuc/meson.build8
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h7
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev_logs.h3
-rw-r--r--drivers/event/dsw/dsw_evdev.c13
-rw-r--r--drivers/event/meson.build6
-rw-r--r--drivers/event/opdl/opdl_evdev.c7
-rw-r--r--drivers/event/opdl/opdl_evdev_xstats.c7
-rw-r--r--drivers/event/opdl/opdl_ring.h8
-rw-r--r--drivers/event/sw/sw_evdev_selftest.c22
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c4
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h3
-rw-r--r--drivers/net/atlantic/atl_ethdev.c106
-rw-r--r--drivers/net/atlantic/atl_rxtx.c21
-rw-r--r--drivers/net/atlantic/atl_types.h9
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_b0.c14
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_b0.h2
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils.c19
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils.h16
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c130
-rw-r--r--drivers/net/avf/avf_ethdev.c3
-rw-r--r--drivers/net/axgbe/axgbe_common.h4
-rw-r--r--drivers/net/bnx2x/bnx2x.c59
-rw-r--r--drivers/net/bnx2x/bnx2x.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c32
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c21
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h3
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h2
-rw-r--r--drivers/net/bnx2x/ecore_reg.h2
-rw-r--r--drivers/net/bnx2x/ecore_sp.c12
-rw-r--r--drivers/net/bnx2x/ecore_sp.h17
-rw-r--r--drivers/net/bnx2x/elink.h2
-rw-r--r--drivers/net/bnxt/bnxt.h2
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c22
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c3
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c22
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h1
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c8
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c167
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h15
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c2
-rw-r--r--drivers/net/cxgbe/base/t4_pci_id_tbl.h52
-rw-r--r--drivers/net/cxgbe/cxgbe.h24
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c15
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c36
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c10
-rw-r--r--drivers/net/cxgbe/sge.c78
-rw-r--r--drivers/net/dpaa2/dpaa2_pmd_logs.h3
-rw-r--r--drivers/net/e1000/base/e1000_82575.h4
-rw-r--r--drivers/net/e1000/base/e1000_ich8lan.c2
-rw-r--r--drivers/net/enetc/enetc_ethdev.c6
-rw-r--r--drivers/net/enetc/enetc_rxtx.c12
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_clsf.c38
-rw-r--r--drivers/net/enic/enic_flow.c659
-rw-r--r--drivers/net/enic/enic_main.c9
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_rxtx_common.h3
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c4
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c6
-rw-r--r--drivers/net/i40e/i40e_ethdev.c60
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c5
-rw-r--r--drivers/net/i40e/i40e_flow.c8
-rw-r--r--drivers/net/i40e/i40e_rxtx.c11
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c6
-rw-r--r--drivers/net/kni/rte_eth_kni.c5
-rw-r--r--drivers/net/mlx4/mlx4.c14
-rw-r--r--drivers/net/mlx4/mlx4.h26
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c38
-rw-r--r--drivers/net/mlx4/mlx4_flow.c67
-rw-r--r--drivers/net/mlx4/mlx4_flow.h6
-rw-r--r--drivers/net/mlx4/mlx4_intr.c40
-rw-r--r--drivers/net/mlx4/mlx4_mr.c51
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c56
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h18
-rw-r--r--drivers/net/mlx4/mlx4_txq.c14
-rw-r--r--drivers/net/mlx5/mlx5.c23
-rw-r--r--drivers/net/mlx5/mlx5.h7
-rw-r--r--drivers/net/mlx5/mlx5_defs.h3
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c28
-rw-r--r--drivers/net/mlx5/mlx5_flow.c72
-rw-r--r--drivers/net/mlx5/mlx5_flow.h1
-rw-r--r--drivers/net/mlx5/mlx5_flow_dv.c20
-rw-r--r--drivers/net/mlx5/mlx5_flow_tcf.c8
-rw-r--r--drivers/net/mlx5/mlx5_flow_verbs.c12
-rw-r--r--drivers/net/mlx5/mlx5_mac.c4
-rw-r--r--drivers/net/mlx5/mlx5_mr.c57
-rw-r--r--drivers/net/mlx5/mlx5_nl.c12
-rw-r--r--drivers/net/mlx5/mlx5_rss.c10
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c8
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c71
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c5
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h9
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c4
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h17
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h12
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h11
-rw-r--r--drivers/net/mlx5/mlx5_socket.c8
-rw-r--r--drivers/net/mlx5/mlx5_stats.c14
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c16
-rw-r--r--drivers/net/mlx5/mlx5_txq.c65
-rw-r--r--drivers/net/mlx5/mlx5_vlan.c6
-rw-r--r--drivers/net/mvpp2/mrvl_mtr.c3
-rw-r--r--drivers/net/netvsc/hn_ethdev.c3
-rw-r--r--drivers/net/netvsc/hn_rxtx.c55
-rw-r--r--drivers/net/netvsc/hn_var.h32
-rw-r--r--drivers/net/netvsc/hn_vf.c84
-rw-r--r--drivers/net/nfp/nfp_net.c53
-rw-r--r--drivers/net/nfp/nfp_net_ctrl.h4
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h4
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h3
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c18
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cppcore.c9
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.h1
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c5
-rw-r--r--drivers/net/qede/base/common_hsi.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h2
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h2
-rw-r--r--drivers/net/qede/base/ecore_hw_defs.h2
-rw-r--r--drivers/net/qede/qede_ethdev.c6
-rw-r--r--drivers/net/qede/qede_rxtx.c17
-rw-r--r--drivers/net/ring/rte_eth_ring.c96
-rw-r--r--drivers/net/sfc/sfc.c6
-rw-r--r--drivers/net/sfc/sfc.h2
-rw-r--r--drivers/net/sfc/sfc_debug.h3
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c11
-rw-r--r--drivers/net/sfc/sfc_ethdev.c72
-rw-r--r--drivers/net/sfc/sfc_mcdi.c3
-rw-r--r--drivers/net/sfc/sfc_rx.c22
-rw-r--r--drivers/net/sfc/sfc_rx.h4
-rw-r--r--drivers/net/sfc/sfc_tso.c11
-rw-r--r--drivers/net/sfc/sfc_tx.c3
-rw-r--r--drivers/net/softnic/rte_eth_softnic_flow.c5
-rw-r--r--drivers/net/tap/rte_eth_tap.c28
-rw-r--r--drivers/net/tap/tap_bpf_program.c2
-rw-r--r--drivers/net/vdev_netvsc/vdev_netvsc.c7
-rw-r--r--drivers/net/virtio/virtio_ethdev.c24
-rw-r--r--drivers/net/virtio/virtio_ethdev.h2
-rw-r--r--drivers/net/virtio/virtio_rxtx.c36
-rw-r--r--drivers/net/virtio/virtio_user/vhost.h4
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c12
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c5
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c3
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h2
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c61
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c1
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c2
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.c21
-rw-r--r--drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h4
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.c3
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.h4
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev.c5
-rw-r--r--examples/ethtool/lib/rte_ethtool.h4
-rw-r--r--examples/fips_validation/main.c8
-rw-r--r--examples/ip_pipeline/meson.build1
-rw-r--r--examples/ipsec-secgw/Makefile2
-rw-r--r--examples/ipsec-secgw/esp.c5
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c26
-rw-r--r--examples/ipsec-secgw/ipsec.h14
-rw-r--r--examples/ipsec-secgw/sa.c58
-rw-r--r--examples/ipsec-secgw/sp4.c47
-rw-r--r--examples/ipsec-secgw/sp6.c47
-rw-r--r--examples/l2fwd-cat/cat.c2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/init.c13
-rw-r--r--examples/performance-thread/common/lthread_api.h10
-rw-r--r--examples/vhost_crypto/meson.build1
-rw-r--r--examples/vhost_scsi/vhost_scsi.c6
-rw-r--r--examples/vm_power_manager/channel_monitor.c12
-rw-r--r--examples/vm_power_manager/main.c15
-rw-r--r--examples/vm_power_manager/oob_monitor_x86.c23
-rw-r--r--examples/vm_power_manager/power_manager.c2
-rw-r--r--kernel/linux/kni/ethtool/igb/igb_main.c6
-rw-r--r--kernel/linux/kni/ethtool/igb/kcompat.h4
-rw-r--r--kernel/linux/kni/kni_misc.c2
-rw-r--r--lib/librte_acl/acl_vect.h4
-rw-r--r--lib/librte_acl/meson.build2
-rw-r--r--lib/librte_bbdev/rte_bbdev.h4
-rw-r--r--lib/librte_bitratestats/rte_bitrate.c6
-rw-r--r--lib/librte_bpf/rte_bpf.h6
-rw-r--r--lib/librte_bpf/rte_bpf_ethdev.h4
-rw-r--r--lib/librte_cfgfile/rte_cfgfile.c4
-rw-r--r--lib/librte_cryptodev/rte_crypto_asym.h10
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c4
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.h2
-rw-r--r--lib/librte_distributor/rte_distributor_private.h2
-rw-r--r--lib/librte_eal/bsdapp/eal/eal.c14
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_hugepage_info.c2
-rw-r--r--lib/librte_eal/common/eal_common_memory.c13
-rw-r--r--lib/librte_eal/common/eal_common_options.c243
-rw-r--r--lib/librte_eal/common/eal_common_proc.c57
-rw-r--r--lib/librte_eal/common/eal_common_thread.c23
-rw-r--r--lib/librte_eal/common/eal_internal_cfg.h3
-rw-r--r--lib/librte_eal/common/eal_options.h2
-rw-r--r--lib/librte_eal/common/hotplug_mp.c2
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h8
-rw-r--r--lib/librte_eal/common/include/generic/rte_cycles.h2
-rw-r--r--lib/librte_eal/common/include/generic/rte_rwlock.h16
-rw-r--r--lib/librte_eal/common/include/generic/rte_spinlock.h18
-rw-r--r--lib/librte_eal/common/include/generic/rte_vect.h2
-rw-r--r--lib/librte_eal/common/include/rte_class.h2
-rw-r--r--lib/librte_eal/common/include/rte_common.h4
-rw-r--r--lib/librte_eal/common/include/rte_eal.h26
-rw-r--r--lib/librte_eal/common/include/rte_lcore.h17
-rw-r--r--lib/librte_eal/common/include/rte_log.h2
-rw-r--r--lib/librte_eal/common/include/rte_malloc.h2
-rw-r--r--lib/librte_eal/common/include/rte_service.h2
-rw-r--r--lib/librte_eal/common/include/rte_string_fns.h16
-rw-r--r--lib/librte_eal/common/include/rte_tailq.h2
-rw-r--r--lib/librte_eal/common/include/rte_uuid.h4
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/include/rte_vfio.h6
-rw-r--r--lib/librte_eal/common/malloc_mp.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c19
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_dev.c4
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_hugepage_info.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memalloc.c4
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c26
-rw-r--r--lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h2
-rw-r--r--lib/librte_efd/rte_efd.h2
-rw-r--r--lib/librte_ethdev/rte_eth_ctrl.h2
-rw-r--r--lib/librte_ethdev/rte_ethdev.c7
-rw-r--r--lib/librte_ethdev/rte_ethdev.h10
-rw-r--r--lib/librte_ethdev/rte_ethdev_core.h4
-rw-r--r--lib/librte_ethdev/rte_ethdev_driver.h2
-rw-r--r--lib/librte_ethdev/rte_tm.h8
-rw-r--r--lib/librte_eventdev/rte_event_crypto_adapter.c16
-rw-r--r--lib/librte_eventdev/rte_event_crypto_adapter.h2
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.c2
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.h4
-rw-r--r--lib/librte_eventdev/rte_event_eth_tx_adapter.h3
-rw-r--r--lib/librte_eventdev/rte_eventdev.h21
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h2
-rw-r--r--lib/librte_flow_classify/rte_flow_classify.h4
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c15
-rw-r--r--lib/librte_hash/rte_hash.h4
-rw-r--r--lib/librte_ip_frag/rte_ip_frag.h2
-rw-r--r--lib/librte_kni/rte_kni.h2
-rw-r--r--lib/librte_latencystats/rte_latencystats.h2
-rw-r--r--lib/librte_lpm/rte_lpm.h2
-rw-r--r--lib/librte_mbuf/rte_mbuf.h18
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.h2
-rw-r--r--lib/librte_mempool/rte_mempool.h4
-rw-r--r--lib/librte_net/rte_ether.h2
-rw-r--r--lib/librte_power/power_acpi_cpufreq.c5
-rw-r--r--lib/librte_power/rte_power.c30
-rw-r--r--lib/librte_power/rte_power.h2
-rw-r--r--lib/librte_power/rte_power_empty_poll.c3
-rw-r--r--lib/librte_power/rte_power_empty_poll.h2
-rw-r--r--lib/librte_rawdev/rte_rawdev.h6
-rw-r--r--lib/librte_rawdev/rte_rawdev_pmd.h8
-rw-r--r--lib/librte_reorder/rte_reorder.h2
-rw-r--r--lib/librte_ring/rte_ring.c3
-rw-r--r--lib/librte_ring/rte_ring.h4
-rw-r--r--lib/librte_ring/rte_ring_generic.h9
-rw-r--r--lib/librte_sched/rte_sched.h2
-rw-r--r--lib/librte_security/rte_security.h4
-rw-r--r--lib/librte_table/rte_table_hash.h6
-rw-r--r--lib/librte_table/rte_table_hash_func.h2
-rw-r--r--lib/librte_telemetry/Makefile5
-rw-r--r--lib/librte_telemetry/rte_telemetry_parser.c22
-rw-r--r--lib/librte_vhost/rte_vhost.h4
-rw-r--r--lib/librte_vhost/socket.c22
-rw-r--r--lib/librte_vhost/vdpa.c7
-rw-r--r--lib/librte_vhost/vhost.c16
-rw-r--r--lib/librte_vhost/vhost.h46
-rw-r--r--lib/librte_vhost/vhost_crypto.c4
-rw-r--r--lib/librte_vhost/vhost_user.c54
-rw-r--r--lib/librte_vhost/virtio_net.c34
-rw-r--r--meson.build9
-rw-r--r--mk/exec-env/linuxapp/rte.vars.mk6
-rw-r--r--mk/rte.app.mk3
-rw-r--r--mk/rte.lib.mk2
-rw-r--r--mk/toolchain/gcc/rte.toolchain-compat.mk11
-rw-r--r--mk/toolchain/gcc/rte.vars.mk3
-rw-r--r--pkg/dpdk.spec2
-rw-r--r--test/test/autotest_data.py6
-rw-r--r--test/test/commands.c12
-rw-r--r--test/test/meson.build3
-rw-r--r--test/test/test_barrier.c6
-rw-r--r--test/test/test_compressdev.c1
-rw-r--r--test/test/test_cryptodev.c3
-rw-r--r--test/test/test_cryptodev_blockcipher.h4
-rw-r--r--test/test/test_distributor.c7
-rw-r--r--test/test/test_eal_flags.c1
-rw-r--r--test/test/test_event_eth_rx_adapter.c3
-rw-r--r--test/test/test_hash_perf.c6
-rw-r--r--test/test/test_link_bonding.c45
-rw-r--r--test/test/test_pmd_perf.c13
-rw-r--r--test/test/test_spinlock.c31
-rw-r--r--test/test/test_string_fns.c45
433 files changed, 4961 insertions, 2394 deletions
diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh
new file mode 100755
index 00000000..4eb7c3cf
--- /dev/null
+++ b/.ci/linux-build.sh
@@ -0,0 +1,24 @@
+#!/bin/sh -xe
+
+on_error() {
+ if [ $? = 0 ]; then
+ exit
+ fi
+ FILES_TO_PRINT="build/meson-logs/testlog.txt build/.ninja_log build/meson-logs/meson-log.txt"
+
+ for pr_file in $FILES_TO_PRINT; do
+ if [ -e "$pr_file" ]; then
+ cat "$pr_file"
+ fi
+ done
+}
+trap on_error EXIT
+
+if [ "$AARCH64" = "1" ]; then
+ # convert the arch specifier
+ OPTS="$OPTS --cross-file config/arm/arm64_armv8_linuxapp_gcc"
+fi
+
+OPTS="$OPTS --default-library=$DEF_LIB"
+meson build --werror -Dexamples=all $OPTS
+ninja -C build
diff --git a/.ci/linux-setup.sh b/.ci/linux-setup.sh
new file mode 100755
index 00000000..acdf9f37
--- /dev/null
+++ b/.ci/linux-setup.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+python3 -m pip install --upgrade meson --user
diff --git a/.gitignore b/.gitignore
index 9105e26c..3d6b35ed 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@ doc/guides/cryptodevs/overview_feature_table.txt
doc/guides/cryptodevs/overview_cipher_table.txt
doc/guides/cryptodevs/overview_auth_table.txt
doc/guides/cryptodevs/overview_aead_table.txt
+doc/guides/cryptodevs/overview_asym_table.txt
doc/guides/compressdevs/overview_feature_table.txt
cscope.out.po
cscope.out.in
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000..7b167fa6
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,98 @@
+language: c
+cache: ccache
+compiler:
+ - gcc
+ - clang
+
+dist: xenial
+
+os:
+ - linux
+
+addons:
+ apt:
+ update: true
+ packages: &required_packages
+ - [libnuma-dev, linux-headers-$(uname -r), python3-setuptools, python3-wheel, python3-pip, ninja-build]
+
+aarch64_packages: &aarch64_packages
+ - *required_packages
+ - [gcc-aarch64-linux-gnu, libc6-dev-arm64-cross]
+
+extra_packages: &extra_packages
+ - *required_packages
+ - [libbsd-dev, libpcap-dev, libcrypto++-dev, libjansson4]
+
+before_install: ./.ci/${TRAVIS_OS_NAME}-setup.sh
+
+env:
+ - DEF_LIB="static"
+ - DEF_LIB="shared"
+ - DEF_LIB="static" OPTS="-Denable_kmods=false"
+ - DEF_LIB="shared" OPTS="-Denable_kmods=false"
+
+matrix:
+ include:
+ - env: DEF_LIB="static" OPTS="-Denable_kmods=false" AARCH64=1
+ compiler: gcc
+ addons:
+ apt:
+ packages:
+ - *aarch64_packages
+ - env: DEF_LIB="shared" OPTS="-Denable_kmods=false" AARCH64=1
+ compiler: gcc
+ addons:
+ apt:
+ packages:
+ - *aarch64_packages
+ - env: DEF_LIB="static" EXTRA_PACKAGES=1
+ compiler: gcc
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+ - env: DEF_LIB="shared" EXTRA_PACKAGES=1
+ compiler: gcc
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+ - env: DEF_LIB="static" OPTS="-Denable_kmods=false" EXTRA_PACKAGES=1
+ compiler: gcc
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+ - env: DEF_LIB="shared" OPTS="-Denable_kmods=false" EXTRA_PACKAGES=1
+ compiler: gcc
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+ - env: DEF_LIB="static" EXTRA_PACKAGES=1
+ compiler: clang
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+ - env: DEF_LIB="shared" EXTRA_PACKAGES=1
+ compiler: clang
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+ - env: DEF_LIB="static" OPTS="-Denable_kmods=false" EXTRA_PACKAGES=1
+ compiler: clang
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+ - env: DEF_LIB="shared" OPTS="-Denable_kmods=false" EXTRA_PACKAGES=1
+ compiler: clang
+ addons:
+ apt:
+ packages:
+ - *extra_packages
+
+
+script: ./.ci/${TRAVIS_OS_NAME}-build.sh
diff --git a/MAINTAINERS b/MAINTAINERS
index 71ba3120..d0ba42ee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -120,6 +120,12 @@ F: config/rte_config.h
F: buildtools/gen-pmdinfo-cfile.sh
F: buildtools/symlink-drivers-solibs.sh
+Public CI
+M: Aaron Conole <aconole@redhat.com>
+M: Michael Santana <msantana@redhat.com>
+F: .travis.yml
+F: .ci/
+
ABI versioning
M: Neil Horman <nhorman@tuxdriver.com>
F: lib/librte_compat/
@@ -234,7 +240,7 @@ F: drivers/net/i40e/i40e_rxtx_vec_neon.c
F: drivers/net/virtio/virtio_rxtx_simple_neon.c
IBM POWER (alpha)
-M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
+M: David Christensen <drc@linux.vnet.ibm.com>
F: lib/librte_eal/common/arch/ppc_64/
F: lib/librte_eal/common/include/arch/ppc_64/
F: drivers/net/i40e/i40e_rxtx_vec_altivec.c
diff --git a/app/pdump/main.c b/app/pdump/main.c
index 5e183ea9..ccf2a1d2 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -512,12 +512,19 @@ cleanup_pdump_resources(void)
if (pt->dir & RTE_PDUMP_FLAG_TX)
free_ring_data(pt->tx_ring, pt->tx_vdev_id, &pt->stats);
- /* Remove the vdev created */
- rte_eth_dev_get_name_by_port(pt->rx_vdev_id, name);
- rte_eal_hotplug_remove("vdev", name);
+ /* Remove the vdev(s) created */
+ if (pt->dir & RTE_PDUMP_FLAG_RX) {
+ rte_eth_dev_get_name_by_port(pt->rx_vdev_id, name);
+ rte_eal_hotplug_remove("vdev", name);
+ }
+
+ if (pt->single_pdump_dev)
+ continue;
- rte_eth_dev_get_name_by_port(pt->tx_vdev_id, name);
- rte_eal_hotplug_remove("vdev", name);
+ if (pt->dir & RTE_PDUMP_FLAG_TX) {
+ rte_eth_dev_get_name_by_port(pt->tx_vdev_id, name);
+ rte_eal_hotplug_remove("vdev", name);
+ }
}
cleanup_rings();
diff --git a/app/test-bbdev/test_bbdev.c b/app/test-bbdev/test_bbdev.c
index a914817b..137c74cd 100644
--- a/app/test-bbdev/test_bbdev.c
+++ b/app/test-bbdev/test_bbdev.c
@@ -14,6 +14,8 @@
#include <rte_bbdev.h>
#include <rte_bbdev_op.h>
#include <rte_bbdev_pmd.h>
+#include<string.h>
+#include <rte_string_fns.h>
#include "main.h"
@@ -788,14 +790,14 @@ test_bbdev_driver_init(void)
/* Initialize the maximum amount of devices */
do {
- sprintf(name_tmp, "%s%i", "name_", num_devs);
+ snprintf(name_tmp, sizeof(name_tmp), "%s%i", "name_", num_devs);
dev2 = rte_bbdev_allocate(name_tmp);
TEST_ASSERT(dev2 != NULL,
"Failed to initialize bbdev driver");
++num_devs;
} while (num_devs < (RTE_BBDEV_MAX_DEVS - 1));
- sprintf(name_tmp, "%s%i", "name_", num_devs);
+ snprintf(name_tmp, sizeof(name_tmp), "%s%i", "name_", num_devs);
dev2 = rte_bbdev_allocate(name_tmp);
TEST_ASSERT(dev2 == NULL, "Failed to initialize bbdev driver number %d "
"more drivers than RTE_BBDEV_MAX_DEVS: %d ", num_devs,
@@ -804,7 +806,7 @@ test_bbdev_driver_init(void)
num_devs--;
while (num_devs >= num_devs_tmp) {
- sprintf(name_tmp, "%s%i", "name_", num_devs);
+ snprintf(name_tmp, sizeof(name_tmp), "%s%i", "name_", num_devs);
dev2 = rte_bbdev_get_named_dev(name_tmp);
TEST_ASSERT_SUCCESS(rte_bbdev_release(dev2),
"Failed to uninitialize bbdev driver %s ",
@@ -825,7 +827,7 @@ test_bbdev_driver_init(void)
TEST_ASSERT_FAIL(rte_bbdev_release(NULL),
"Failed to uninitialize bbdev driver with NULL bbdev");
- sprintf(name_tmp, "%s", "invalid_name");
+ strlcpy(name_tmp, "invalid_name", sizeof(name_tmp));
dev2 = rte_bbdev_get_named_dev(name_tmp);
TEST_ASSERT_FAIL(rte_bbdev_release(dev2),
"Failed to uninitialize bbdev driver with invalid name");
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 953e058c..0aa0de8b 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -129,6 +129,11 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
struct rte_cryptodev_info cdev_info;
uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+ /* range check the socket_id - negative values become big
+ * positive ones due to use of unsigned value
+ */
+ if (socket_id >= RTE_MAX_NUMA_NODES)
+ socket_id = 0;
rte_cryptodev_info_get(cdev_id, &cdev_info);
if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 51704b50..eeadb2de 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -94,14 +94,15 @@ static void cmd_help_brief_parsed(__attribute__((unused)) void *parsed_result,
cl,
"\n"
"Help is available for the following sections:\n\n"
- " help control : Start and stop forwarding.\n"
- " help display : Displaying port, stats and config "
+ " help control : Start and stop forwarding.\n"
+ " help display : Displaying port, stats and config "
"information.\n"
- " help config : Configuration information.\n"
- " help ports : Configuring ports.\n"
- " help registers : Reading and setting port registers.\n"
- " help filters : Filters configuration help.\n"
- " help all : All of the above sections.\n\n"
+ " help config : Configuration information.\n"
+ " help ports : Configuring ports.\n"
+ " help registers : Reading and setting port registers.\n"
+ " help filters : Filters configuration help.\n"
+ " help traffic_management : Traffic Management commmands.\n"
+ " help all : All of the above sections.\n\n"
);
}
@@ -210,21 +211,32 @@ static void cmd_help_long_parsed(void *parsed_result,
"show port meter stats (port_id) (meter_id) (clear)\n"
" Get meter stats on a port\n\n"
- "show port tm cap (port_id)\n"
- " Display the port TM capability.\n\n"
- "show port tm level cap (port_id) (level_id)\n"
- " Display the port TM hierarchical level capability.\n\n"
+ "show fwd stats all\n"
+ " Display statistics for all fwd engines.\n\n"
- "show port tm node cap (port_id) (node_id)\n"
- " Display the port TM node capability.\n\n"
+ "clear fwd stats all\n"
+ " Clear statistics for all fwd engines.\n\n"
- "show port tm node type (port_id) (node_id)\n"
- " Display the port TM node type.\n\n"
+ "show port (port_id) rx_offload capabilities\n"
+ " List all per queue and per port Rx offloading"
+ " capabilities of a port\n\n"
- "show port tm node stats (port_id) (node_id) (clear)\n"
- " Display the port TM node stats.\n\n"
+ "show port (port_id) rx_offload configuration\n"
+ " List port level and all queue level"
+ " Rx offloading configuration\n\n"
+ "show port (port_id) tx_offload capabilities\n"
+ " List all per queue and per port"
+ " Tx offloading capabilities of a port\n\n"
+
+ "show port (port_id) tx_offload configuration\n"
+ " List port level and all queue level"
+ " Tx offloading configuration\n\n"
+
+ "show port (port_id) tx_metadata\n"
+ " Show Tx metadata value set"
+ " for a specific port\n\n"
);
}
@@ -642,11 +654,6 @@ static void cmd_help_long_parsed(void *parsed_result,
"E-tag set filter del e-tag-id (value) port (port_id)\n"
" Delete an E-tag forwarding filter on a port\n\n"
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
- "set port tm hierarchy default (port_id)\n"
- " Set default traffic Management hierarchy on a port\n\n"
-
-#endif
"ddp add (port_id) (profile_path[,backup_profile_path])\n"
" Load a profile package on a port\n\n"
@@ -727,62 +734,6 @@ static void cmd_help_long_parsed(void *parsed_result,
"show port (port_id) queue-region\n"
" show all queue region related configuration info\n\n"
- "add port tm node shaper profile (port_id) (shaper_profile_id)"
- " (cmit_tb_rate) (cmit_tb_size) (peak_tb_rate) (peak_tb_size)"
- " (packet_length_adjust)\n"
- " Add port tm node private shaper profile.\n\n"
-
- "del port tm node shaper profile (port_id) (shaper_profile_id)\n"
- " Delete port tm node private shaper profile.\n\n"
-
- "add port tm node shared shaper (port_id) (shared_shaper_id)"
- " (shaper_profile_id)\n"
- " Add/update port tm node shared shaper.\n\n"
-
- "del port tm node shared shaper (port_id) (shared_shaper_id)\n"
- " Delete port tm node shared shaper.\n\n"
-
- "set port tm node shaper profile (port_id) (node_id)"
- " (shaper_profile_id)\n"
- " Set port tm node shaper profile.\n\n"
-
- "add port tm node wred profile (port_id) (wred_profile_id)"
- " (color_g) (min_th_g) (max_th_g) (maxp_inv_g) (wq_log2_g)"
- " (color_y) (min_th_y) (max_th_y) (maxp_inv_y) (wq_log2_y)"
- " (color_r) (min_th_r) (max_th_r) (maxp_inv_r) (wq_log2_r)\n"
- " Add port tm node wred profile.\n\n"
-
- "del port tm node wred profile (port_id) (wred_profile_id)\n"
- " Delete port tm node wred profile.\n\n"
-
- "add port tm nonleaf node (port_id) (node_id) (parent_node_id)"
- " (priority) (weight) (level_id) (shaper_profile_id)"
- " (n_sp_priorities) (stats_mask) (n_shared_shapers)"
- " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n"
- " Add port tm nonleaf node.\n\n"
-
- "add port tm leaf node (port_id) (node_id) (parent_node_id)"
- " (priority) (weight) (level_id) (shaper_profile_id)"
- " (cman_mode) (wred_profile_id) (stats_mask) (n_shared_shapers)"
- " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n"
- " Add port tm leaf node.\n\n"
-
- "del port tm node (port_id) (node_id)\n"
- " Delete port tm node.\n\n"
-
- "set port tm node parent (port_id) (node_id) (parent_node_id)"
- " (priority) (weight)\n"
- " Set port tm node parent.\n\n"
-
- "suspend port tm node (port_id) (node_id)"
- " Suspend tm node.\n\n"
-
- "resume port tm node (port_id) (node_id)"
- " Resume tm node.\n\n"
-
- "port tm hierarchy commit (port_id) (clean_on_fail)\n"
- " Commit tm hierarchy.\n\n"
-
"vxlan ip-version (ipv4|ipv6) vni (vni) udp-src"
" (udp-src) udp-dst (udp-dst) ip-src (ip-src) ip-dst"
" (ip-dst) eth-src (eth-src) eth-dst (eth-dst)\n"
@@ -918,6 +869,52 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port)\n\n"
" Add/remove UDP tunnel port for tunneling offload\n\n"
+
+ "port config <port_id> rx_offload vlan_strip|"
+ "ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|"
+ "outer_ipv4_cksum|macsec_strip|header_split|"
+ "vlan_filter|vlan_extend|jumbo_frame|crc_strip|"
+ "scatter|timestamp|security|keep_crc on|off\n"
+ " Enable or disable a per port Rx offloading"
+ " on all Rx queues of a port\n\n"
+
+ "port (port_id) rxq (queue_id) rx_offload vlan_strip|"
+ "ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|"
+ "outer_ipv4_cksum|macsec_strip|header_split|"
+ "vlan_filter|vlan_extend|jumbo_frame|crc_strip|"
+ "scatter|timestamp|security|keep_crc on|off\n"
+ " Enable or disable a per queue Rx offloading"
+ " only on a specific Rx queue\n\n"
+
+ "port config (port_id) tx_offload vlan_insert|"
+ "ipv4_cksum|udp_cksum|tcp_cksum|sctp_cksum|tcp_tso|"
+ "udp_tso|outer_ipv4_cksum|qinq_insert|vxlan_tnl_tso|"
+ "gre_tnl_tso|ipip_tnl_tso|geneve_tnl_tso|"
+ "macsec_insert|mt_lockfree|multi_segs|mbuf_fast_free|"
+ "security|match_metadata on|off\n"
+ " Enable or disable a per port Tx offloading"
+ " on all Tx queues of a port\n\n"
+
+ "port (port_id) txq (queue_id) tx_offload vlan_insert|"
+ "ipv4_cksum|udp_cksum|tcp_cksum|sctp_cksum|tcp_tso|"
+ "udp_tso|outer_ipv4_cksum|qinq_insert|vxlan_tnl_tso|"
+ "gre_tnl_tso|ipip_tnl_tso|geneve_tnl_tso|macsec_insert"
+ "|mt_lockfree|multi_segs|mbuf_fast_free|security"
+ " on|off\n"
+ " Enable or disable a per queue Tx offloading"
+ " only on a specific Tx queue\n\n"
+
+ "bpf-load rx|tx (port) (queue) (J|M|B) (file_name)\n"
+ " Load an eBPF program as a callback"
+ " for particular RX/TX queue\n\n"
+
+ "bpf-unload rx|tx (port) (queue)\n"
+ " Unload previously loaded eBPF program"
+ " for particular RX/TX queue\n\n"
+
+ "port config (port_id) tx_metadata (value)\n"
+ " Set Tx metadata value per port. Testpmd will add this value"
+ " to any Tx packet sent from this port\n\n"
);
}
@@ -1137,6 +1134,107 @@ static void cmd_help_long_parsed(void *parsed_result,
" flow rules\n\n"
);
}
+
+ if (show_all || !strcmp(res->section, "traffic_management")) {
+ cmdline_printf(
+ cl,
+ "\n"
+ "Traffic Management:\n"
+ "--------------\n"
+ "show port tm cap (port_id)\n"
+ " Display the port TM capability.\n\n"
+
+ "show port tm level cap (port_id) (level_id)\n"
+ " Display the port TM hierarchical level capability.\n\n"
+
+ "show port tm node cap (port_id) (node_id)\n"
+ " Display the port TM node capability.\n\n"
+
+ "show port tm node type (port_id) (node_id)\n"
+ " Display the port TM node type.\n\n"
+
+ "show port tm node stats (port_id) (node_id) (clear)\n"
+ " Display the port TM node stats.\n\n"
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
+ "set port tm hierarchy default (port_id)\n"
+ " Set default traffic Management hierarchy on a port\n\n"
+#endif
+
+ "add port tm node shaper profile (port_id) (shaper_profile_id)"
+ " (cmit_tb_rate) (cmit_tb_size) (peak_tb_rate) (peak_tb_size)"
+ " (packet_length_adjust)\n"
+ " Add port tm node private shaper profile.\n\n"
+
+ "del port tm node shaper profile (port_id) (shaper_profile_id)\n"
+ " Delete port tm node private shaper profile.\n\n"
+
+ "add port tm node shared shaper (port_id) (shared_shaper_id)"
+ " (shaper_profile_id)\n"
+ " Add/update port tm node shared shaper.\n\n"
+
+ "del port tm node shared shaper (port_id) (shared_shaper_id)\n"
+ " Delete port tm node shared shaper.\n\n"
+
+ "set port tm node shaper profile (port_id) (node_id)"
+ " (shaper_profile_id)\n"
+ " Set port tm node shaper profile.\n\n"
+
+ "add port tm node wred profile (port_id) (wred_profile_id)"
+ " (color_g) (min_th_g) (max_th_g) (maxp_inv_g) (wq_log2_g)"
+ " (color_y) (min_th_y) (max_th_y) (maxp_inv_y) (wq_log2_y)"
+ " (color_r) (min_th_r) (max_th_r) (maxp_inv_r) (wq_log2_r)\n"
+ " Add port tm node wred profile.\n\n"
+
+ "del port tm node wred profile (port_id) (wred_profile_id)\n"
+ " Delete port tm node wred profile.\n\n"
+
+ "add port tm nonleaf node (port_id) (node_id) (parent_node_id)"
+ " (priority) (weight) (level_id) (shaper_profile_id)"
+ " (n_sp_priorities) (stats_mask) (n_shared_shapers)"
+ " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n"
+ " Add port tm nonleaf node.\n\n"
+
+ "add port tm leaf node (port_id) (node_id) (parent_node_id)"
+ " (priority) (weight) (level_id) (shaper_profile_id)"
+ " (cman_mode) (wred_profile_id) (stats_mask) (n_shared_shapers)"
+ " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n"
+ " Add port tm leaf node.\n\n"
+
+ "del port tm node (port_id) (node_id)\n"
+ " Delete port tm node.\n\n"
+
+ "set port tm node parent (port_id) (node_id) (parent_node_id)"
+ " (priority) (weight)\n"
+ " Set port tm node parent.\n\n"
+
+ "suspend port tm node (port_id) (node_id)"
+ " Suspend tm node.\n\n"
+
+ "resume port tm node (port_id) (node_id)"
+ " Resume tm node.\n\n"
+
+ "port tm hierarchy commit (port_id) (clean_on_fail)\n"
+ " Commit tm hierarchy.\n\n"
+
+ "set port tm mark ip_ecn (port) (green) (yellow)"
+ " (red)\n"
+ " Enables/Disables the traffic management marking"
+ " for IP ECN (Explicit Congestion Notification)"
+ " packets on a given port\n\n"
+
+ "set port tm mark ip_dscp (port) (green) (yellow)"
+ " (red)\n"
+ " Enables/Disables the traffic management marking"
+ " on the port for IP dscp packets\n\n"
+
+ "set port tm mark vlan_dei (port) (green) (yellow)"
+ " (red)\n"
+ " Enables/Disables the traffic management marking"
+ " on the port for VLAN packets with DEI enabled\n\n"
+ );
+ }
+
}
cmdline_parse_token_string_t cmd_help_long_help =
@@ -1145,12 +1243,13 @@ cmdline_parse_token_string_t cmd_help_long_help =
cmdline_parse_token_string_t cmd_help_long_section =
TOKEN_STRING_INITIALIZER(struct cmd_help_long_result, section,
"all#control#display#config#"
- "ports#registers#filters");
+ "ports#registers#filters#traffic_management");
cmdline_parse_inst_t cmd_help_long = {
.f = cmd_help_long_parsed,
.data = NULL,
- .help_str = "help all|control|display|config|ports|register|filters: "
+ .help_str = "help all|control|display|config|ports|register|"
+ "filters|traffic_management: "
"Show help",
.tokens = {
(void *)&cmd_help_long_help,
@@ -12070,7 +12169,7 @@ cmd_set_hash_global_config_parsed(void *parsed_result,
res->port_id);
else
printf("Global hash configurations have been set "
- "succcessfully by port %d\n", res->port_id);
+ "successfully by port %d\n", res->port_id);
}
cmdline_parse_token_string_t cmd_set_hash_global_config_all =
@@ -17625,7 +17724,7 @@ print_rx_offloads(uint64_t offloads)
begin = __builtin_ctzll(offloads);
end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads);
- single_offload = 1 << begin;
+ single_offload = 1ULL << begin;
for (bit = begin; bit < end; bit++) {
if (offloads & single_offload)
printf(" %s",
@@ -18019,7 +18118,7 @@ print_tx_offloads(uint64_t offloads)
begin = __builtin_ctzll(offloads);
end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads);
- single_offload = 1 << begin;
+ single_offload = 1ULL << begin;
for (bit = begin; bit < end; bit++) {
if (offloads & single_offload)
printf(" %s",
@@ -18196,13 +18295,13 @@ search_tx_offload(const char *name)
single_offload = 1;
for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) {
single_name = rte_eth_dev_tx_offload_name(single_offload);
+ if (single_name == NULL)
+ break;
if (!strcasecmp(single_name, name)) {
found = 1;
break;
} else if (!strcasecmp(single_name, "UNKNOWN"))
break;
- else if (single_name == NULL)
- break;
single_offload <<= 1;
}
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 5c0108fa..d202566b 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -35,6 +35,7 @@ enum index {
PREFIX,
BOOLEAN,
STRING,
+ HEX,
MAC_ADDR,
IPV4_ADDR,
IPV6_ADDR,
@@ -1122,6 +1123,9 @@ static int parse_boolean(struct context *, const struct token *,
static int parse_string(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
+static int parse_hex(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size);
static int parse_mac_addr(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -1198,6 +1202,13 @@ static const struct token token_list[] = {
.call = parse_string,
.comp = comp_none,
},
+ [HEX] = {
+ .name = "{hex}",
+ .type = "HEX",
+ .help = "fixed string",
+ .call = parse_hex,
+ .comp = comp_none,
+ },
[MAC_ADDR] = {
.name = "{MAC address}",
.type = "MAC-48",
@@ -2306,7 +2317,7 @@ static const struct token token_list[] = {
[ACTION_RSS_KEY] = {
.name = "key",
.help = "RSS hash key",
- .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
+ .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
.args = ARGS(ARGS_ENTRY_ARB(0, 0),
ARGS_ENTRY_ARB
(offsetof(struct action_rss_data, conf) +
@@ -4441,6 +4452,121 @@ error:
return -1;
}
+static int
+parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c = NULL;
+ uint32_t i, len;
+ char tmp[3];
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ /* Convert chars to bytes */
+ for (i = 0, len = 0; i < *size; i += 2) {
+ snprintf(tmp, 3, "%s", src + i);
+ dst[len++] = strtoul(tmp, &c, 16);
+ if (*c != 0) {
+ len--;
+ dst[len] = 0;
+ *size = len;
+ return -1;
+ }
+ }
+ dst[len] = 0;
+ *size = len;
+
+ return 0;
+}
+
+static int
+parse_hex(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg_data = pop_args(ctx);
+ const struct arg *arg_len = pop_args(ctx);
+ const struct arg *arg_addr = pop_args(ctx);
+ char tmp[16]; /* Ought to be enough. */
+ int ret;
+ unsigned int hexlen = len;
+ unsigned int length = 256;
+ uint8_t hex_tmp[length];
+
+ /* Arguments are expected. */
+ if (!arg_data)
+ return -1;
+ if (!arg_len) {
+ push_args(ctx, arg_data);
+ return -1;
+ }
+ if (!arg_addr) {
+ push_args(ctx, arg_len);
+ push_args(ctx, arg_data);
+ return -1;
+ }
+ size = arg_data->size;
+ /* Bit-mask fill is not supported. */
+ if (arg_data->mask)
+ goto error;
+ if (!ctx->object)
+ return len;
+
+ /* translate bytes string to array. */
+ if (str[0] == '0' && ((str[1] == 'x') ||
+ (str[1] == 'X'))) {
+ str += 2;
+ hexlen -= 2;
+ }
+ if (hexlen > length)
+ return -1;
+ ret = parse_hex_string(str, hex_tmp, &hexlen);
+ if (ret < 0)
+ goto error;
+ /* Let parse_int() fill length information first. */
+ ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
+ if (ret < 0)
+ goto error;
+ push_args(ctx, arg_len);
+ ret = parse_int(ctx, token, tmp, ret, NULL, 0);
+ if (ret < 0) {
+ pop_args(ctx);
+ goto error;
+ }
+ buf = (uint8_t *)ctx->object + arg_data->offset;
+ /* Output buffer is not necessarily NUL-terminated. */
+ memcpy(buf, hex_tmp, hexlen);
+ memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
+ if (ctx->objmask)
+ memset((uint8_t *)ctx->objmask + arg_data->offset,
+ 0xff, hexlen);
+ /* Save address if requested. */
+ if (arg_addr->size) {
+ memcpy((uint8_t *)ctx->object + arg_addr->offset,
+ (void *[]){
+ (uint8_t *)ctx->object + arg_data->offset
+ },
+ arg_addr->size);
+ if (ctx->objmask)
+ memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
+ (void *[]){
+ (uint8_t *)ctx->objmask + arg_data->offset
+ },
+ arg_addr->size);
+ }
+ return len;
+error:
+ push_args(ctx, arg_addr);
+ push_args(ctx, arg_len);
+ push_args(ctx, arg_data);
+ return -1;
+
+}
+
/**
* Parse a MAC address.
*
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index b9e5dd92..4004e3a4 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2955,7 +2955,6 @@ vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
void
tx_vlan_set(portid_t port_id, uint16_t vlan_id)
{
- int vlan_offload;
struct rte_eth_dev_info dev_info;
if (port_id_is_invalid(port_id, ENABLED_WARN))
@@ -2963,8 +2962,8 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
if (vlan_id_is_invalid(vlan_id))
return;
- vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
- if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
+ if (ports[port_id].dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_QINQ_INSERT) {
printf("Error, as QinQ has been enabled.\n");
return;
}
@@ -2983,7 +2982,6 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
void
tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
{
- int vlan_offload;
struct rte_eth_dev_info dev_info;
if (port_id_is_invalid(port_id, ENABLED_WARN))
@@ -2993,11 +2991,6 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
if (vlan_id_is_invalid(vlan_id_outer))
return;
- vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
- if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
- printf("Error, as QinQ hasn't been enabled.\n");
- return;
- }
rte_eth_dev_info_get(port_id, &dev_info);
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
printf("Error: qinq insert not supported by port %d\n",
@@ -3006,7 +2999,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
}
tx_vlan_reset(port_id);
- ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT;
+ ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = vlan_id;
ports[port_id].tx_vlan_id_outer = vlan_id_outer;
}
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index ffeee205..f4f2a7b2 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -575,7 +575,7 @@ mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
/*
* Allocate a new mbuf with up to tx_pkt_nb_segs segments.
- * Copy packet contents and offload information into then new segmented mbuf.
+ * Copy packet contents and offload information into the new segmented mbuf.
*/
static struct rte_mbuf *
pkt_copy_split(const struct rte_mbuf *pkt)
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 7b0c8e68..cf983b16 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -188,6 +188,8 @@ struct fwd_engine * fwd_engines[] = {
NULL,
};
+struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
+
struct fwd_config cur_fwd_config;
struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
uint32_t retry_enabled;
@@ -844,7 +846,7 @@ setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
/*
* Configuration initialisation done once at init time.
*/
-static void
+static struct rte_mempool *
mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
unsigned int socket_id)
{
@@ -922,6 +924,7 @@ err:
} else if (verbose_level > 0) {
rte_mempool_dump(stdout, rte_mp);
}
+ return rte_mp;
}
/*
@@ -1139,14 +1142,18 @@ init_config(void)
uint8_t i;
for (i = 0; i < num_sockets; i++)
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
- socket_ids[i]);
+ mempools[i] = mbuf_pool_create(mbuf_data_size,
+ nb_mbuf_per_pool,
+ socket_ids[i]);
} else {
if (socket_num == UMA_NO_CONFIG)
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+ mempools[0] = mbuf_pool_create(mbuf_data_size,
+ nb_mbuf_per_pool, 0);
else
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
- socket_num);
+ mempools[socket_num] = mbuf_pool_create
+ (mbuf_data_size,
+ nb_mbuf_per_pool,
+ socket_num);
}
init_port_config();
@@ -1388,7 +1395,7 @@ fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
"%-"PRIu64"\n",
stats->ipackets, stats->imissed,
- (uint64_t) (stats->ipackets + stats->imissed));
+ stats->ipackets + stats->imissed);
if (cur_fwd_eng == &csum_fwd_engine)
printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64"Bad-outer-l4csum: %-14"PRIu64"\n",
@@ -1402,13 +1409,13 @@ fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
"%-"PRIu64"\n",
stats->opackets, port->tx_dropped,
- (uint64_t) (stats->opackets + port->tx_dropped));
+ stats->opackets + port->tx_dropped);
}
else {
printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
"%14"PRIu64"\n",
stats->ipackets, stats->imissed,
- (uint64_t) (stats->ipackets + stats->imissed));
+ stats->ipackets + stats->imissed);
if (cur_fwd_eng == &csum_fwd_engine)
printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64" Bad-outer-l4csum: %-14"PRIu64"\n",
@@ -1423,7 +1430,7 @@ fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
"%14"PRIu64"\n",
stats->opackets, port->tx_dropped,
- (uint64_t) (stats->opackets + port->tx_dropped));
+ stats->opackets + port->tx_dropped);
}
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
@@ -1471,15 +1478,19 @@ fwd_stream_stats_display(streamid_t stream_id)
"TX Port=%2d/Queue=%2d %s\n",
fwd_top_stats_border, fs->rx_port, fs->rx_queue,
fs->tx_port, fs->tx_queue, fwd_top_stats_border);
- printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
+ printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
+ " TX-dropped: %-14"PRIu64,
fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
/* if checksum mode */
if (cur_fwd_eng == &csum_fwd_engine) {
- printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
- "%-14u Rx- bad outer L4 checksum: %-14u\n",
+ printf(" RX- bad IP checksum: %-14"PRIu64
+ " Rx- bad L4 checksum: %-14"PRIu64
+ " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
fs->rx_bad_outer_l4_csum);
+ } else {
+ printf("\n");
}
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
@@ -1755,9 +1766,6 @@ stop_packet_forwarding(void)
uint64_t total_rx_dropped;
uint64_t total_tx_dropped;
uint64_t total_rx_nombuf;
- uint64_t tx_dropped;
- uint64_t rx_bad_ip_csum;
- uint64_t rx_bad_l4_csum;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t fwd_cycles;
#endif
@@ -1784,38 +1792,22 @@ stop_packet_forwarding(void)
fwd_cycles = 0;
#endif
for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+ struct fwd_stream *fs = fwd_streams[sm_id];
+
if (cur_fwd_config.nb_fwd_streams >
cur_fwd_config.nb_fwd_ports) {
fwd_stream_stats_display(sm_id);
- ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
- ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
+ ports[fs->tx_port].tx_stream = NULL;
+ ports[fs->rx_port].rx_stream = NULL;
} else {
- ports[fwd_streams[sm_id]->tx_port].tx_stream =
- fwd_streams[sm_id];
- ports[fwd_streams[sm_id]->rx_port].rx_stream =
- fwd_streams[sm_id];
- }
- tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
- tx_dropped = (uint64_t) (tx_dropped +
- fwd_streams[sm_id]->fwd_dropped);
- ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
-
- rx_bad_ip_csum =
- ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
- rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
- fwd_streams[sm_id]->rx_bad_ip_csum);
- ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
- rx_bad_ip_csum;
-
- rx_bad_l4_csum =
- ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
- rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
- fwd_streams[sm_id]->rx_bad_l4_csum);
- ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
- rx_bad_l4_csum;
-
- ports[fwd_streams[sm_id]->rx_port].rx_bad_outer_l4_csum +=
- fwd_streams[sm_id]->rx_bad_outer_l4_csum;
+ ports[fs->tx_port].tx_stream = fs;
+ ports[fs->rx_port].rx_stream = fs;
+ }
+ ports[fs->tx_port].tx_dropped += fs->fwd_dropped;
+ ports[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
+ ports[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
+ ports[fs->rx_port].rx_bad_outer_l4_csum +=
+ fs->rx_bad_outer_l4_csum;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
fwd_cycles = (uint64_t) (fwd_cycles +
@@ -2399,6 +2391,7 @@ pmd_test_exit(void)
struct rte_device *device;
portid_t pt_id;
int ret;
+ int i;
if (test_done == 0)
stop_packet_forwarding();
@@ -2452,6 +2445,10 @@ pmd_test_exit(void)
return;
}
}
+ for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ if (mempools[i])
+ rte_mempool_free(mempools[i]);
+ }
printf("\nBye...\n");
}
@@ -2965,8 +2962,9 @@ init_port_dcb_config(portid_t pid,
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
/* re-configure the device . */
- rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
-
+ retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
+ if (retval < 0)
+ return retval;
rte_eth_dev_info_get(pid, &rte_port->dev_info);
/* If dev_info.vmdq_pool_base is greater than 0,
@@ -3058,6 +3056,8 @@ print_stats(void)
printf("\nPort statistics ====================================");
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
nic_stats_display(fwd_ports_ids[i]);
+
+ fflush(stdout);
}
static void
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 3ff11e64..37d61b80 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -119,12 +119,12 @@ struct fwd_stream {
unsigned int retry_enabled;
/* "read-write" results */
- unsigned int rx_packets; /**< received packets */
- unsigned int tx_packets; /**< received packets transmitted */
- unsigned int fwd_dropped; /**< received packets not forwarded */
- unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */
- unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
- unsigned int rx_bad_outer_l4_csum;
+ uint64_t rx_packets; /**< received packets */
+ uint64_t tx_packets; /**< received packets transmitted */
+ uint64_t fwd_dropped; /**< received packets not forwarded */
+ uint64_t rx_bad_ip_csum ; /**< received packets has bad ip checksum */
+ uint64_t rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
+ uint64_t rx_bad_outer_l4_csum;
/**< received packets has bad outer l4 checksum */
unsigned int gro_times; /**< GRO operation times */
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
@@ -173,7 +173,6 @@ struct rte_port {
uint16_t tunnel_tso_segsz; /**< Segmentation offload MSS for tunneled pkts. */
uint16_t tx_vlan_id;/**< The tag ID */
uint16_t tx_vlan_id_outer;/**< The outer tag ID */
- void *fwd_ctx; /**< Forwarding mode context */
uint64_t rx_bad_ip_csum; /**< rx pkts with bad ip checksum */
uint64_t rx_bad_l4_csum; /**< rx pkts with bad l4 checksum */
uint64_t rx_bad_outer_l4_csum;
diff --git a/buildtools/symlink-drivers-solibs.sh b/buildtools/symlink-drivers-solibs.sh
index 9826c6ae..42985e85 100644
--- a/buildtools/symlink-drivers-solibs.sh
+++ b/buildtools/symlink-drivers-solibs.sh
@@ -7,6 +7,7 @@
# others, e.g. PCI device PMDs depending on the PCI bus driver.
# parameters to script are paths relative to install prefix:
-# 1. directory containing driver files e.g. lib64/dpdk/drivers
-# 2. directory for installed regular libs e.g. lib64
-ln -rsf ${DESTDIR}/${MESON_INSTALL_PREFIX}/$1/* ${DESTDIR}/${MESON_INSTALL_PREFIX}/$2
+# 1. directory for installed regular libs e.g. lib64
+# 2. subdirectory of libdir where the pmds are
+
+cd ${MESON_INSTALL_DESTDIR_PREFIX}/$1 && ln -sfv $2/librte_*.so* .
diff --git a/config/arm/meson.build b/config/arm/meson.build
index dae55d6b..9feb54f2 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -6,6 +6,7 @@
march_opt = '-march=@0@'.format(machine)
arm_force_native_march = false
+arm_force_default_march = (machine == 'default')
machine_args_generic = [
['default', ['-march=armv8-a+crc+crypto']],
@@ -105,7 +106,10 @@ else
cmd_generic = ['generic', '', '', 'default', '']
cmd_output = cmd_generic # Set generic by default
machine_args = [] # Clear previous machine args
- if not meson.is_cross_build()
+ if arm_force_default_march and not meson.is_cross_build()
+ machine = impl_generic
+ impl_pn = 'default'
+ elif not meson.is_cross_build()
# The script returns ['Implementer', 'Variant', 'Architecture',
# 'Primary Part number', 'Revision']
detect_vendor = find_program(join_paths(
@@ -115,8 +119,8 @@ else
cmd_output = cmd.stdout().to_lower().strip().split(' ')
endif
# Set to generic if variable is not found
- machine = get_variable('impl_' + cmd_output[0], 'generic')
- if machine == 'generic'
+ machine = get_variable('impl_' + cmd_output[0], ['generic'])
+ if machine[0] == 'generic'
machine = impl_generic
cmd_output = cmd_generic
endif
diff --git a/config/meson.build b/config/meson.build
index db32499b..80d25382 100644
--- a/config/meson.build
+++ b/config/meson.build
@@ -1,6 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+# driver .so files often depend upon the bus drivers for their connect bus,
+# e.g. ixgbe depends on librte_bus_pci. This means that the bus drivers need
+# to be in the library path, so symlink the drivers from the main lib directory.
+meson.add_install_script('../buildtools/symlink-drivers-solibs.sh',
+ get_option('libdir'),
+ pmd_subdir_opt)
+
# set the machine type and cflags for it
if meson.is_cross_build()
machine = host_machine.cpu()
diff --git a/config/x86/meson.build b/config/x86/meson.build
index 9e5952aa..ae92f86a 100644
--- a/config/x86/meson.build
+++ b/config/x86/meson.build
@@ -1,23 +1,24 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-# for checking defines we need to use the correct compiler flags
-march_opt = ['-march=@0@'.format(machine)]
-
# get binutils version for the workaround of Bug 97
ldver = run_command('ld', '-v').stdout().strip()
if ldver.contains('2.30')
if cc.has_argument('-mno-avx512f')
- march_opt += '-mno-avx512f'
+ machine_args += '-mno-avx512f'
message('Binutils 2.30 detected, disabling AVX512 support as workaround for bug #97')
endif
+ if ldver.contains('2.31') and cc.has_argument('-mno-avx512f')
+ machine_args += '-mno-avx512f'
+ message('Binutils 2.31 detected, disabling AVX512 support as workaround for bug #249')
+ endif
endif
# we require SSE4.2 for DPDK
sse_errormsg = '''SSE4.2 instruction set is required for DPDK.
Please set the machine type to "nehalem" or "corei7" or higher value'''
-if cc.get_define('__SSE4_2__', args: march_opt) == ''
+if cc.get_define('__SSE4_2__', args: machine_args) == ''
error(sse_errormsg)
endif
@@ -37,23 +38,23 @@ else
dpdk_conf.set('RTE_ARCH', 'i686')
endif
-if cc.get_define('__AES__', args: march_opt) != ''
+if cc.get_define('__AES__', args: machine_args) != ''
dpdk_conf.set('RTE_MACHINE_CPUFLAG_AES', 1)
compile_time_cpuflags += ['RTE_CPUFLAG_AES']
endif
-if cc.get_define('__PCLMUL__', args: march_opt) != ''
+if cc.get_define('__PCLMUL__', args: machine_args) != ''
dpdk_conf.set('RTE_MACHINE_CPUFLAG_PCLMULQDQ', 1)
compile_time_cpuflags += ['RTE_CPUFLAG_PCLMULQDQ']
endif
-if cc.get_define('__AVX__', args: march_opt) != ''
+if cc.get_define('__AVX__', args: machine_args) != ''
dpdk_conf.set('RTE_MACHINE_CPUFLAG_AVX', 1)
compile_time_cpuflags += ['RTE_CPUFLAG_AVX']
endif
-if cc.get_define('__AVX2__', args: march_opt) != ''
+if cc.get_define('__AVX2__', args: machine_args) != ''
dpdk_conf.set('RTE_MACHINE_CPUFLAG_AVX2', 1)
compile_time_cpuflags += ['RTE_CPUFLAG_AVX2']
endif
-if cc.get_define('__AVX512F__', args: march_opt) != ''
+if cc.get_define('__AVX512F__', args: machine_args) != ''
dpdk_conf.set('RTE_MACHINE_CPUFLAG_AVX512F', 1)
compile_time_cpuflags += ['RTE_CPUFLAG_AVX512F']
endif
diff --git a/devtools/check-symbol-change.sh b/devtools/check-symbol-change.sh
index 8f986a5d..f6f79a88 100755
--- a/devtools/check-symbol-change.sh
+++ b/devtools/check-symbol-change.sh
@@ -97,7 +97,7 @@ check_for_rule_violations()
then
# Just inform the user of this occurrence, but
# don't flag it as an error
- echo -n "INFO: symbol $syname is added but "
+ echo -n "INFO: symbol $symname is added but "
echo -n "patch has insuficient context "
echo -n "to determine the section name "
echo -n "please ensure the version is "
@@ -105,26 +105,46 @@ check_for_rule_violations()
continue
fi
- if [ "$secname" != "EXPERIMENTAL" ]
+ oldsecname=$(sed -n \
+ "s#$mname $symname \(.*\) del#\1#p" "$mapdb")
+
+ # A symbol can not enter a non experimental
+ # section directly
+ if [ -z "$oldsecname" ]
then
- # Symbols that are getting added in a section
- # other than the experimental section
- # to be moving from an already supported
- # section or its a violation
- grep -q \
- "$mname $symname [^EXPERIMENTAL] del" "$mapdb"
- if [ $? -ne 0 ]
+ if [ "$secname" = 'EXPERIMENTAL' ]
then
+ echo -n "INFO: symbol $symname has "
+ echo -n "been added to the "
+ echo -n "EXPERIMENTAL section of the "
+ echo "version map"
+ continue
+ else
echo -n "ERROR: symbol $symname "
- echo -n "is added in a section "
- echo -n "other than the EXPERIMENTAL "
+ echo -n "is added in the $secname "
+ echo -n "section, but is expected to "
+ echo -n "be added in the EXPERIMENTAL "
echo "section of the version map"
ret=1
+ continue
fi
fi
+
+ # This symbol is moving between two sections (the
+ # original section is not experimental).
+ # This can be legit, just warn.
+ if [ "$oldsecname" != 'EXPERIMENTAL' ]
+ then
+ echo -n "INFO: symbol $symname is being "
+ echo -n "moved from $oldsecname to $secname. "
+ echo -n "Ensure that it has gone through the "
+ echo "deprecation process"
+ continue
+ fi
else
- if [ "$secname" != "EXPERIMENTAL" ]
+ if ! grep -q "$mname $symname .* add" "$mapdb" && \
+ [ "$secname" != "EXPERIMENTAL" ]
then
# Just inform users that non-experimenal
# symbols need to go through a deprecation
diff --git a/devtools/checkpatches.sh b/devtools/checkpatches.sh
index 3b03b7ef..02d1c303 100755
--- a/devtools/checkpatches.sh
+++ b/devtools/checkpatches.sh
@@ -62,7 +62,7 @@ check_forbidden_additions() { # <patch>
-v RET_ON_FAIL=1 \
-v MESSAGE='Using explicit .svg extension instead of .*' \
-f $(dirname $(readlink -e $0))/check-forbidden-tokens.awk \
- "$1" || res = 1
+ "$1" || res=1
return $res
}
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 42f4ad00..d37b121c 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -9,6 +9,7 @@ default_path=$PATH
# - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
# - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS
+# - DPDK_DEP_ELF (y/[n])
# - DPDK_DEP_ISAL (y/[n])
# - DPDK_DEP_JSON (y/[n])
# - DPDK_DEP_LDFLAGS
@@ -96,6 +97,7 @@ reset_env ()
unset CROSS
unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS
+ unset DPDK_DEP_ELF
unset DPDK_DEP_ISAL
unset DPDK_DEP_JSON
unset DPDK_DEP_LDFLAGS
@@ -148,7 +150,7 @@ config () # <directory> <target> <options>
test "$DPDK_DEP_ARCHIVE" != y || \
sed -ri 's,(RESOURCE_TAR=)n,\1y,' $1/.config
test "$DPDK_DEP_ISAL" != y || \
- sed -ri 's,(ISAL_PMD=)n,\1y,' $1/.config
+ sed -ri 's,(PMD_ISAL=)n,\1y,' $1/.config
test "$DPDK_DEP_MLX" != y || \
sed -ri 's,(MLX._PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_SZE" != y || \
@@ -156,6 +158,8 @@ config () # <directory> <target> <options>
test "$DPDK_DEP_ZLIB" != y || \
sed -ri 's,(BNX2X_PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_ZLIB" != y || \
+ sed -ri 's,(PMD_ZLIB=)n,\1y,' $1/.config
+ test "$DPDK_DEP_ZLIB" != y || \
sed -ri 's,(COMPRESSDEV_TEST=)n,\1y,' $1/.config
test "$DPDK_DEP_PCAP" != y || \
sed -ri 's,(PCAP=)n,\1y,' $1/.config
@@ -176,7 +180,7 @@ config () # <directory> <target> <options>
test "$DPDK_DEP_SSL" != y || \
sed -ri 's,(PMD_OPENSSL=)n,\1y,' $1/.config
test "$DPDK_DEP_SSL" != y || \
- sed -ri 's,(PMD_QAT=)n,\1y,' $1/.config
+ sed -ri 's,(QAT_SYM=)n,\1y,' $1/.config
test -z "$FLEXRAN_SDK" || \
sed -ri 's,(BBDEV_TURBO_SW=)n,\1y,' $1/.config
sed -ri 's,(SCHED_.*=)n,\1y,' $1/.config
@@ -186,7 +190,9 @@ config () # <directory> <target> <options>
sed -ri 's,(MVPP2_PMD=)n,\1y,' $1/.config
test -z "$LIBMUSDK_PATH" || \
sed -ri 's,(MVNETA_PMD=)n,\1y,' $1/.config
- test -z "$DPDK_DEP_JSON" || \
+ test "$DPDK_DEP_ELF" != y || \
+ sed -ri 's,(BPF_ELF=)n,\1y,' $1/.config
+ test "$DPDK_DEP_JSON" != y || \
sed -ri 's,(TELEMETRY=)n,\1y,' $1/.config
build_config_hook $1 $2 $3
diff --git a/devtools/test-meson-builds.sh b/devtools/test-meson-builds.sh
index 3edc805f..79148ba2 100755
--- a/devtools/test-meson-builds.sh
+++ b/devtools/test-meson-builds.sh
@@ -7,7 +7,7 @@
# * if a build-directory already exists we assume it was properly configured
# Run ninja after configuration is done.
-srcdir=$(dirname $(readlink -m $0))/..
+srcdir=$(dirname $(readlink -f $0))/..
MESON=${MESON:-meson}
use_shared="--default-library=shared"
@@ -36,6 +36,7 @@ build () # <directory> <meson options>
# shared and static linked builds with gcc and clang
for c in gcc clang ; do
+ command -v $c >/dev/null 2>&1 || continue
for s in static shared ; do
export CC="ccache $c"
build build-$c-$s --default-library=$s
@@ -43,7 +44,12 @@ for c in gcc clang ; do
done
# test compilation with minimal x86 instruction set
-build build-x86-default -Dmachine=nehalem $use_shared
+default_machine='nehalem'
+ok=$(cc -march=$default_machine -E - < /dev/null > /dev/null 2>&1 || echo false)
+if [ "$ok" = "false" ] ; then
+ default_machine='corei7'
+fi
+build build-x86-default -Dmachine=$default_machine $use_shared
# enable cross compilation if gcc cross-compiler is found
c=aarch64-linux-gnu-gcc
diff --git a/doc/guides/compressdevs/overview.rst b/doc/guides/compressdevs/overview.rst
index 70bbe82b..809e4e6e 100644
--- a/doc/guides/compressdevs/overview.rst
+++ b/doc/guides/compressdevs/overview.rst
@@ -18,7 +18,7 @@ Supported Feature Flags
without making any modifications to it (no compression done).
- "OOP SGL In SGL Out" feature flag stands for
- "Out-of-place Scatter-gather list Input, Scatter-gater list Output",
+ "Out-of-place Scatter-gather list Input, Scatter-gather list Output",
which means PMD supports different scatter-gather styled input and output buffers
(i.e. both can consists of multiple segments).
diff --git a/doc/guides/conf.py b/doc/guides/conf.py
index da99a3f8..a85f6c9d 100644
--- a/doc/guides/conf.py
+++ b/doc/guides/conf.py
@@ -391,6 +391,11 @@ def setup(app):
'AEAD',
'AEAD algorithms in crypto drivers',
'AEAD algorithm')
+ table_file = dirname(__file__) + '/cryptodevs/overview_asym_table.txt'
+ generate_overview_table(table_file, 5,
+ 'Asymmetric',
+ 'Asymmetric algorithms in crypto drivers',
+ 'Asymmetric algorithm')
table_file = dirname(__file__) + '/compressdevs/overview_feature_table.txt'
generate_overview_table(table_file, 1,
'Features',
diff --git a/doc/guides/contributing/coding_style.rst b/doc/guides/contributing/coding_style.rst
index d96698a7..656563dd 100644
--- a/doc/guides/contributing/coding_style.rst
+++ b/doc/guides/contributing/coding_style.rst
@@ -825,10 +825,10 @@ format.
.. code-block:: python
sources = files('file1.c', ...)
- headers = files('file1.c', ...)
+ headers = files('file1.h', ...)
-The will build based on a number of conventions and assumptions within the DPDK
+This will build based on a number of conventions and assumptions within the DPDK
itself, for example, that the library name is the same as the directory name in
which the files are stored.
diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst
index c72280a2..408859e2 100644
--- a/doc/guides/contributing/documentation.rst
+++ b/doc/guides/contributing/documentation.rst
@@ -40,14 +40,14 @@ The main directories that contain files related to documentation are shown below
|-- ...
-The API documentation is built from `Doxygen <http://www.stack.nl/~dimitri/doxygen/>`_ comments in the header files.
+The API documentation is built from `Doxygen <http://www.doxygen.nl>`_ comments in the header files.
These files are mainly in the ``lib/librte_*`` directories although some of the Poll Mode Drivers in ``drivers/net``
are also documented with Doxygen.
The configuration files that are used to control the Doxygen output are in the ``doc/api`` directory.
The user guides such as *The Programmers Guide* and the *FreeBSD* and *Linux Getting Started* Guides are generated
-from RST markup text files using the `Sphinx <http://sphinx-doc.org/index.html>`_ Documentation Generator.
+from RST markup text files using the `Sphinx <http://sphinx-doc.org>`_ Documentation Generator.
These files are included in the ``doc/guides/`` directory.
The output is controlled by the ``doc/guides/conf.py`` file.
@@ -174,7 +174,8 @@ For full support with figure and table captioning the latest version of Sphinx c
sudo pip install --upgrade sphinx
sudo pip install --upgrade sphinx_rtd_theme
-For further information on getting started with Sphinx see the `Sphinx Tutorial <http://sphinx-doc.org/tutorial.html>`_.
+For further information on getting started with Sphinx see the
+`Sphinx Getting Started <http://www.sphinx-doc.org/en/master/usage/quickstart.html>`_.
.. Note::
@@ -596,7 +597,7 @@ Doxygen Guidelines
The DPDK API is documented using Doxygen comment annotations in the header files.
Doxygen is a very powerful tool, it is extremely configurable and with a little effort can be used to create expressive documents.
-See the `Doxygen website <http://www.stack.nl/~dimitri/doxygen/>`_ for full details on how to use it.
+See the `Doxygen website <http://www.doxygen.nl>`_ for full details on how to use it.
The following are some guidelines for use of Doxygen in the DPDK API documentation:
diff --git a/doc/guides/contributing/patches.rst b/doc/guides/contributing/patches.rst
index a64bb036..1bd91b7e 100644
--- a/doc/guides/contributing/patches.rst
+++ b/doc/guides/contributing/patches.rst
@@ -8,7 +8,7 @@ Contributing Code to DPDK
This document outlines the guidelines for submitting code to DPDK.
-The DPDK development process is modelled (loosely) on the Linux Kernel development model so it is worth reading the
+The DPDK development process is modeled (loosely) on the Linux Kernel development model so it is worth reading the
Linux kernel guide on submitting patches:
`How to Get Your Change Into the Linux Kernel <https://www.kernel.org/doc/html/latest/process/submitting-patches.html>`_.
The rationale for many of the DPDK guidelines is explained in greater detail in the kernel guidelines.
@@ -32,6 +32,10 @@ The mailing list for DPDK development is `dev@dpdk.org <http://mails.dpdk.org/ar
Contributors will need to `register for the mailing list <http://mails.dpdk.org/listinfo/dev>`_ in order to submit patches.
It is also worth registering for the DPDK `Patchwork <http://patches.dpdk.org/project/dpdk/list/>`_
+If you are using the GitHub service, you can link your repository to
+the ``travis-ci.org`` build service. When you push patches to your GitHub
+repository, the travis service will automatically build your changes.
+
The development process requires some familiarity with the ``git`` version control system.
Refer to the `Pro Git Book <http://www.git-scm.com/book/>`_ for further information.
diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst
index 01b36247..fe17c7f1 100644
--- a/doc/guides/contributing/versioning.rst
+++ b/doc/guides/contributing/versioning.rst
@@ -548,26 +548,29 @@ utilities which can be installed via a package manager. For example::
The syntax of the ``validate-abi.sh`` utility is::
- ./devtools/validate-abi.sh <REV1> <REV2> <TARGET>
+ ./devtools/validate-abi.sh <REV1> <REV2>
Where ``REV1`` and ``REV2`` are valid gitrevisions(7)
https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
-on the local repo and target is the usual DPDK compilation target.
+on the local repo.
For example::
# Check between the previous and latest commit:
- ./devtools/validate-abi.sh HEAD~1 HEAD x86_64-native-linuxapp-gcc
+ ./devtools/validate-abi.sh HEAD~1 HEAD
+
+ # Check on a specific compilation target:
+ ./devtools/validate-abi.sh -t x86_64-native-linux-gcc HEAD~1 HEAD
# Check between two tags:
- ./devtools/validate-abi.sh v2.0.0 v2.1.0 x86_64-native-linuxapp-gcc
+ ./devtools/validate-abi.sh v2.0.0 v2.1.0
# Check between git master and local topic-branch "vhost-hacking":
- ./devtools/validate-abi.sh master vhost-hacking x86_64-native-linuxapp-gcc
+ ./devtools/validate-abi.sh master vhost-hacking
After the validation script completes (it can take a while since it need to
compile both tags) it will create compatibility reports in the
-``./compat_report`` directory. Listed incompatibilities can be found as
-follows::
+``./abi-check/compat_report`` directory. Listed incompatibilities can be found
+as follows::
- grep -lr Incompatible compat_reports/
+ grep -lr Incompatible abi-check/compat_reports/
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index d9fd41ae..ccfc137f 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -125,7 +125,7 @@ Extra notes
For AES Counter mode (AES-CTR), the library supports two different sizes for Initialization
Vector (IV):
-* 12 bytes: used mainly for IPSec, as it requires 12 bytes from the user, which internally
+* 12 bytes: used mainly for IPsec, as it requires 12 bytes from the user, which internally
are appended the counter block (4 bytes), which is set to 1 for the first block
(no padding required from the user)
diff --git a/doc/guides/cryptodevs/features/aesni_gcm.ini b/doc/guides/cryptodevs/features/aesni_gcm.ini
index b9e9c906..fdd3df69 100644
--- a/doc/guides/cryptodevs/features/aesni_gcm.ini
+++ b/doc/guides/cryptodevs/features/aesni_gcm.ini
@@ -30,3 +30,8 @@ AES GMAC = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'aesni_gcm' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini
index f7295745..9b4428ef 100644
--- a/doc/guides/cryptodevs/features/aesni_mb.ini
+++ b/doc/guides/cryptodevs/features/aesni_mb.ini
@@ -32,16 +32,30 @@ DES DOCSIS BPI = Y
;
[Auth]
MD5 HMAC = Y
+SHA1 = Y
SHA1 HMAC = Y
+SHA224 = Y
SHA224 HMAC = Y
+SHA256 = Y
SHA256 HMAC = Y
+SHA384 = Y
SHA384 HMAC = Y
+SHA512 = Y
SHA512 HMAC = Y
AES XCBC MAC = Y
AES CMAC (128) = Y
+AES GMAC = Y
;
; Supported AEAD algorithms of the 'aesni_mb' crypto driver.
;
[AEAD]
AES CCM (128) = Y
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'aesni_mb' crypto driver.
+;
+[Asymmetric]
diff --git a/doc/guides/cryptodevs/features/armv8.ini b/doc/guides/cryptodevs/features/armv8.ini
index 1e104771..e588f902 100644
--- a/doc/guides/cryptodevs/features/armv8.ini
+++ b/doc/guides/cryptodevs/features/armv8.ini
@@ -26,3 +26,8 @@ SHA256 HMAC = Y
; Supported AEAD algorithms of the 'armv8' crypto driver.
;
[AEAD]
+
+;
+; Supported Asymmetric algorithms of the 'armv8' crypto driver.
+;
+[Asymmetric]
diff --git a/doc/guides/cryptodevs/features/caam_jr.ini b/doc/guides/cryptodevs/features/caam_jr.ini
index 68f8d819..c64bd35d 100644
--- a/doc/guides/cryptodevs/features/caam_jr.ini
+++ b/doc/guides/cryptodevs/features/caam_jr.ini
@@ -44,3 +44,8 @@ SHA512 HMAC = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'dpaa2_sec' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
index 4722e135..2970076b 100644
--- a/doc/guides/cryptodevs/features/ccp.ini
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -57,3 +57,8 @@ SHA3_512 HMAC = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'ccp' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 810da0d7..73ec389b 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -95,3 +95,12 @@ AES GCM (256) =
AES CCM (128) =
AES CCM (192) =
AES CCM (256) =
+;
+; Supported Asymmetric algorithms of a default crypto driver.
+;
+[Asymmetric]
+RSA =
+DSA =
+Modular Exponentiation =
+Modular Inversion =
+Diffie-hellman = \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index 69700df4..9f4e4029 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -44,3 +44,8 @@ SHA512 HMAC = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'dpaa2_sec' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 937b621c..954a7080 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -44,3 +44,8 @@ SHA512 HMAC = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'dpaa_sec' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/kasumi.ini b/doc/guides/cryptodevs/features/kasumi.ini
index 0e138f5a..f3d06100 100644
--- a/doc/guides/cryptodevs/features/kasumi.ini
+++ b/doc/guides/cryptodevs/features/kasumi.ini
@@ -22,3 +22,8 @@ KASUMI F9 = Y
; Supported AEAD algorithms of the 'kasumi' crypto driver.
;
[AEAD]
+
+;
+; Supported Asymmetric algorithms of the 'kasumi' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/mvsam.ini b/doc/guides/cryptodevs/features/mvsam.ini
index 0cc90a53..829deff6 100644
--- a/doc/guides/cryptodevs/features/mvsam.ini
+++ b/doc/guides/cryptodevs/features/mvsam.ini
@@ -52,3 +52,8 @@ AES GMAC = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'mvsam' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/null.ini b/doc/guides/cryptodevs/features/null.ini
index ecf5779a..a1c3e22a 100644
--- a/doc/guides/cryptodevs/features/null.ini
+++ b/doc/guides/cryptodevs/features/null.ini
@@ -24,3 +24,8 @@ NULL = Y
; Supported AEAD algorithms of the 'null' crypto driver.
;
[AEAD]
+
+;
+; Supported Asymmetric algorithms of the 'null' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/octeontx.ini b/doc/guides/cryptodevs/features/octeontx.ini
index 307ab88c..1735b8f5 100644
--- a/doc/guides/cryptodevs/features/octeontx.ini
+++ b/doc/guides/cryptodevs/features/octeontx.ini
@@ -60,3 +60,8 @@ ZUC EIA3 = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'octeontx' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini
index 4f15ee0e..7955d884 100644
--- a/doc/guides/cryptodevs/features/qat.ini
+++ b/doc/guides/cryptodevs/features/qat.ini
@@ -60,3 +60,8 @@ AES GCM (256) = Y
AES CCM (128) = Y
AES CCM (192) = Y
AES CCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'qat' crypto driver.
+;
+[Asymmetric]
diff --git a/doc/guides/cryptodevs/features/snow3g.ini b/doc/guides/cryptodevs/features/snow3g.ini
index 27713617..ec2daf6c 100644
--- a/doc/guides/cryptodevs/features/snow3g.ini
+++ b/doc/guides/cryptodevs/features/snow3g.ini
@@ -22,3 +22,8 @@ SNOW3G UIA2 = Y
; Supported AEAD algorithms of the 'snow3g' crypto driver.
;
[AEAD]
+
+;
+; Supported Asymmetric algorithms of the 'snow3g' crypto driver.
+;
+[Asymmetric] \ No newline at end of file
diff --git a/doc/guides/cryptodevs/features/virtio.ini b/doc/guides/cryptodevs/features/virtio.ini
index 168fc174..b59f1669 100644
--- a/doc/guides/cryptodevs/features/virtio.ini
+++ b/doc/guides/cryptodevs/features/virtio.ini
@@ -24,3 +24,8 @@ SHA1 HMAC = Y
; Supported AEAD algorithms of the 'virtio' crypto driver.
;
[AEAD]
+
+;
+; Supported Asymmetric algorithms of the 'virtio' crypto driver.
+;
+[Asymmetric]
diff --git a/doc/guides/cryptodevs/features/zuc.ini b/doc/guides/cryptodevs/features/zuc.ini
index 5bb02afd..9b6a4287 100644
--- a/doc/guides/cryptodevs/features/zuc.ini
+++ b/doc/guides/cryptodevs/features/zuc.ini
@@ -22,3 +22,8 @@ ZUC EIA3 = Y
; Supported AEAD algorithms of the 'zuc' crypto driver.
;
[AEAD]
+
+;
+; Supported Asymmetric algorithms of the 'zuc' crypto driver.
+;
+[Asymmetric]
diff --git a/doc/guides/cryptodevs/openssl.rst b/doc/guides/cryptodevs/openssl.rst
index bdc30f66..2ac8090c 100644
--- a/doc/guides/cryptodevs/openssl.rst
+++ b/doc/guides/cryptodevs/openssl.rst
@@ -46,6 +46,14 @@ Supported AEAD algorithms:
* ``RTE_CRYPTO_AEAD_AES_GCM``
* ``RTE_CRYPTO_AEAD_AES_CCM``
+Supported Asymmetric Crypto algorithms:
+
+* ``RTE_CRYPTO_ASYM_XFORM_RSA``
+* ``RTE_CRYPTO_ASYM_XFORM_DSA``
+* ``RTE_CRYPTO_ASYM_XFORM_DH``
+* ``RTE_CRYPTO_ASYM_XFORM_MODINV``
+* ``RTE_CRYPTO_ASYM_XFORM_MODEX``
+
Installation
------------
diff --git a/doc/guides/cryptodevs/overview.rst b/doc/guides/cryptodevs/overview.rst
index 607e758d..f9f8c7be 100644
--- a/doc/guides/cryptodevs/overview.rst
+++ b/doc/guides/cryptodevs/overview.rst
@@ -18,7 +18,7 @@ Supported Feature Flags
being the operation in-place (input address = output address).
- "OOP SGL In SGL Out" feature flag stands for
- "Out-of-place Scatter-gather list Input, Scatter-gater list Output",
+ "Out-of-place Scatter-gather list Input, Scatter-gather list Output",
which means pmd supports different scatter-gather styled input and output buffers
(i.e. both can consists of multiple segments).
@@ -58,3 +58,10 @@ Supported AEAD Algorithms
.. _table_crypto_pmd_aead_algos:
.. include:: overview_aead_table.txt
+
+Supported Asymmetric Algorithms
+-------------------------------
+
+.. _table_crypto_pmd_asym_algos:
+
+.. include:: overview_asym_table.txt
diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst
index a754a27e..7004ca43 100644
--- a/doc/guides/cryptodevs/scheduler.rst
+++ b/doc/guides/cryptodevs/scheduler.rst
@@ -165,7 +165,7 @@ operation:
For pure small packet size (64 bytes) traffic however the multi-core mode is not
an optimal solution, as it doesn't give significant per-core performance improvement.
For mixed traffic (IMIX) the optimal number of worker cores is around 2-3.
- For large packets (1.5 Kbytes) scheduler shows linear scaling in performance
+ For large packets (1.5 kbytes) scheduler shows linear scaling in performance
up to eight cores.
Each worker uses its own slave cryptodev. Only software cryptodevs
are supported. Only the same type of cryptodevs should be used concurrently.
diff --git a/doc/guides/eventdevs/opdl.rst b/doc/guides/eventdevs/opdl.rst
index 0262a337..979f6cd8 100644
--- a/doc/guides/eventdevs/opdl.rst
+++ b/doc/guides/eventdevs/opdl.rst
@@ -8,7 +8,7 @@ The OPDL (Ordered Packet Distribution Library) eventdev is a specific\
implementation of the eventdev API. It is particularly suited to packet\
processing workloads that have high throughput and low latency requirements.\
All packets follow the same path through the device. The order in which\
-packets follow is determinted by the order in which queues are set up.\
+packets follow is determined by the order in which queues are set up.\
Events are left on the ring until they are transmitted. As a result packets\
do not go out of order
diff --git a/doc/guides/eventdevs/sw.rst b/doc/guides/eventdevs/sw.rst
index afdcad76..04c8b030 100644
--- a/doc/guides/eventdevs/sw.rst
+++ b/doc/guides/eventdevs/sw.rst
@@ -70,7 +70,7 @@ Credit Quanta
The credit quanta is the number of credits that a port will fetch at a time from
the instance's credit pool. Higher numbers will cause less overhead in the
atomic credit fetch code, however it also reduces the overall number of credits
-in the system faster. A balanced number (eg 32) ensures that only small numbers
+in the system faster. A balanced number (e.g. 32) ensures that only small numbers
of credits are pre-allocated at a time, while also mitigating performance impact
of the atomics.
@@ -100,7 +100,7 @@ feature would be significant.
~~~~~~~~~~~~~~~~~~
The software eventdev does not support creating queues that handle all types of
-traffic. An eventdev with this capability allows enqueueing Atomic, Ordered and
+traffic. An eventdev with this capability allows enqueuing Atomic, Ordered and
Parallel traffic to the same queue, but scheduling each of them appropriately.
The reason to not allow Atomic, Ordered and Parallel event types in the
diff --git a/doc/guides/howto/lm_bond_virtio_sriov.rst b/doc/guides/howto/lm_bond_virtio_sriov.rst
index a47d6dbf..4e5ef4d5 100644
--- a/doc/guides/howto/lm_bond_virtio_sriov.rst
+++ b/doc/guides/howto/lm_bond_virtio_sriov.rst
@@ -328,7 +328,7 @@ On host_server_2: Terminal 1
.. code-block:: console
- testomd> show port info all
+ testpmd> show port info all
testpmd> show port stats all
testpmd> show bonding config 2
testpmd> port attach 0000:00:04.0
diff --git a/doc/guides/howto/lm_virtio_vhost_user.rst b/doc/guides/howto/lm_virtio_vhost_user.rst
index 3f5ebd58..019c124d 100644
--- a/doc/guides/howto/lm_virtio_vhost_user.rst
+++ b/doc/guides/howto/lm_virtio_vhost_user.rst
@@ -243,7 +243,7 @@ On host_server_2: Terminal 1
.. code-block:: console
- testomd> show port info all
+ testpmd> show port info all
testpmd> show port stats all
Virtio traffic is seen at P0 and P1.
@@ -338,7 +338,7 @@ reset_vf_on_212_131.sh
#!/bin/sh
# This script is run on the host 10.237.212.131 to reset SRIOV
- # BDF for Ninatic NIC is 0000:06:00.0
+ # BDF for Niantic NIC is 0000:06:00.0
cat /sys/bus/pci/devices/0000\:06\:00.0/max_vfs
echo 0 > /sys/bus/pci/devices/0000\:06\:00.0/max_vfs
cat /sys/bus/pci/devices/0000\:06\:00.0/max_vfs
diff --git a/doc/guides/howto/rte_flow.rst b/doc/guides/howto/rte_flow.rst
index 6a8534d9..e080570e 100644
--- a/doc/guides/howto/rte_flow.rst
+++ b/doc/guides/howto/rte_flow.rst
@@ -23,7 +23,7 @@ In this example we will create a simple rule that drops packets whose IPv4
destination equals 192.168.3.2. This code is equivalent to the following
testpmd command (wrapped for clarity)::
- tpmd> flow create 0 ingress pattern eth / vlan /
+ testpmd> flow create 0 ingress pattern eth / vlan /
ipv4 dst is 192.168.3.2 / end actions drop / end
Code
@@ -118,7 +118,7 @@ a mask.
This code is equivalent to the following testpmd command (wrapped for
clarity)::
- tpmd> flow create 0 ingress pattern eth / vlan /
+ testpmd> flow create 0 ingress pattern eth / vlan /
ipv4 dst spec 192.168.3.0 dst mask 255.255.255.0 /
end actions drop / end
@@ -219,7 +219,7 @@ In this example we will create a rule that routes all vlan id 123 to queue 3.
This code is equivalent to the following testpmd command (wrapped for
clarity)::
- tpmd> flow create 0 ingress pattern eth / vlan vid spec 123 /
+ testpmd> flow create 0 ingress pattern eth / vlan vid spec 123 /
end actions queue index 3 / end
Code
diff --git a/doc/guides/howto/virtio_user_as_exceptional_path.rst b/doc/guides/howto/virtio_user_as_exceptional_path.rst
index 4910c12d..ec021af3 100644
--- a/doc/guides/howto/virtio_user_as_exceptional_path.rst
+++ b/doc/guides/howto/virtio_user_as_exceptional_path.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2016 Intel Corporation.
-.. _virtio_user_as_excpetional_path:
+.. _virtio_user_as_exceptional_path:
Virtio_user as Exceptional Path
===============================
@@ -22,7 +22,7 @@ solution is very promising in:
* Features
vhost-net is born to be a networking solution, which has lots of networking
- related featuers, like multi queue, tso, multi-seg mbuf, etc.
+ related features, like multi queue, tso, multi-seg mbuf, etc.
* Performance
@@ -38,7 +38,7 @@ in :numref:`figure_virtio_user_as_exceptional_path`.
.. figure:: img/virtio_user_as_exceptional_path.*
- Overview of a DPDK app using virtio-user as excpetional path
+ Overview of a DPDK app using virtio-user as exceptional path
Sample Usage
@@ -75,7 +75,7 @@ compiling the kernel and those kernel modules should be inserted.
* ``queues``
- Number of multi-queues. Each qeueue will be served by a kthread. For example:
+ Number of multi-queues. Each queue will be served by a kthread. For example:
.. code-block:: console
diff --git a/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
index 9d1f0fa0..fd7a46c8 100644
--- a/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
+++ b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
@@ -14,25 +14,25 @@ This chapter describes how to cross compile DPDK for ARM64 from x86 build hosts.
Obtain the cross tool chain
---------------------------
The latest cross compile tool chain can be downloaded from:
-https://releases.linaro.org/components/toolchain/binaries/latest/aarch64-linux-gnu/.
+https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads.
-Following is the step to get the version 7.2.1, latest one at the time of this writing.
+Following is the step to get the version 8.2, latest one at the time of this writing.
.. code-block:: console
- wget https://releases.linaro.org/components/toolchain/binaries/latest/aarch64-linux-gnu/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu.tar.xz
+ wget https://developer.arm.com/-/media/Files/downloads/gnu-a/8.2-2019.01/gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu.tar.xz
Unzip and add into the PATH
---------------------------
.. code-block:: console
- tar -xvf gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu.tar.xz
- export PATH=$PATH:<cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/bin
+ tar -xvf gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu.tar.xz
+ export PATH=$PATH:<cross_install_dir>/gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/bin
.. note::
- For the host requirements and other info, refer to the release note section: https://releases.linaro.org/components/toolchain/binaries/latest/
+ For the host requirements and other info, refer to the release note section: https://releases.linaro.org/components/toolchain/binaries/
Getting the prerequisite library
--------------------------------
@@ -69,8 +69,8 @@ Copy the NUMA header files and lib to the cross compiler's directories:
.. code-block:: console
- cp <numa_install_dir>/include/numa*.h <cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/bin/../aarch64-linux-gnu/libc/usr/include/
- cp <numa_install_dir>/lib/libnuma.a <cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/7.2.1/
+ cp <numa_install_dir>/include/numa*.h <cross_install_dir>/gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/bin/../aarch64-linux-gnu/libc/usr/include/
+ cp <numa_install_dir>/lib/libnuma.a <cross_install_dir>/gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/8.2/
.. _configure_and_cross_compile_dpdk_build:
diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst
index 1c6f86a2..735bc6ee 100644
--- a/doc/guides/linux_gsg/sys_reqs.rst
+++ b/doc/guides/linux_gsg/sys_reqs.rst
@@ -184,7 +184,7 @@ In the case of a dual-socket NUMA system,
the number of hugepages reserved at boot time is generally divided equally between the two sockets
(on the assumption that sufficient memory is present on both sockets).
-See the Documentation/kernel-parameters.txt file in your Linux source tree for further details of these and other kernel options.
+See the Documentation/admin-guide/kernel-parameters.txt file in your Linux source tree for further details of these and other kernel options.
**Alternative:**
diff --git a/doc/guides/nics/atlantic.rst b/doc/guides/nics/atlantic.rst
index 80591b13..f6f2c66b 100644
--- a/doc/guides/nics/atlantic.rst
+++ b/doc/guides/nics/atlantic.rst
@@ -18,7 +18,7 @@ Supported features
- Port statistics
- RSS (Receive Side Scaling)
- Checksum offload
-- Jumbo Frame upto 16K
+- Jumbo Frame up to 16K
Configuration Information
^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst
index 58d88eef..39106274 100644
--- a/doc/guides/nics/cxgbe.rst
+++ b/doc/guides/nics/cxgbe.rst
@@ -126,7 +126,7 @@ enabling debugging options may affect system performance.
- ``CONFIG_RTE_LIBRTE_CXGBE_TPUT`` (default **y**)
- Toggle behaviour to prefer Throughput or Latency.
+ Toggle behavior to prefer Throughput or Latency.
Runtime Options
~~~~~~~~~~~~~~~
@@ -140,7 +140,7 @@ be passed as part of EAL arguments. For example,
- ``keep_ovlan`` (default **0**)
- Toggle behaviour to keep/strip outer VLAN in Q-in-Q packets. If
+ Toggle behavior to keep/strip outer VLAN in Q-in-Q packets. If
enabled, the outer VLAN tag is preserved in Q-in-Q packets. Otherwise,
the outer VLAN tag is stripped in Q-in-Q packets.
diff --git a/doc/guides/nics/dpaa.rst b/doc/guides/nics/dpaa.rst
index 2173673b..5900ed0d 100644
--- a/doc/guides/nics/dpaa.rst
+++ b/doc/guides/nics/dpaa.rst
@@ -251,7 +251,7 @@ state during application initialization:
automatically be assigned from the these high perf PUSH queues. Any queue
configuration beyond that will be standard Rx queues. The application can
choose to change their number if HW portals are limited.
- The valid values are from '0' to '4'. The valuse shall be set to '0' if the
+ The valid values are from '0' to '4'. The values shall be set to '0' if the
application want to use eventdev with DPAA device.
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index 769dc4e1..04370e4c 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -379,7 +379,7 @@ active -- Ethernet, crypto, compression, etc.
DPBP based Mempool driver
~~~~~~~~~~~~~~~~~~~~~~~~~
-The DPBP driver is bound to a DPBP objects and provides sevices to
+The DPBP driver is bound to a DPBP objects and provides services to
create a hardware offloaded packet buffer mempool.
DPAA2 NIC Driver
diff --git a/doc/guides/nics/enetc.rst b/doc/guides/nics/enetc.rst
index 8038bf20..376768d3 100644
--- a/doc/guides/nics/enetc.rst
+++ b/doc/guides/nics/enetc.rst
@@ -69,7 +69,7 @@ Supported ENETC SoCs
Prerequisites
~~~~~~~~~~~~~
-There are three main pre-requisities for executing ENETC PMD on a ENETC
+There are three main pre-requisites for executing ENETC PMD on a ENETC
compatible board:
1. **ARM 64 Tool Chain**
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index bc38f51a..c1b83b9b 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -224,7 +224,7 @@ the use of SR-IOV.
passthrough devices do not require libvirt, port profiles, and VM-FEX.
-.. _enic-genic-flow-api:
+.. _enic-generic-flow-api:
Generic Flow API support
------------------------
@@ -247,7 +247,7 @@ Generic Flow API is supported. The baseline support is:
in the pattern.
- Attributes: ingress
- - Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp
+ - Items: eth, vlan, ipv4, ipv6, udp, tcp, vxlan, inner eth, vlan, ipv4, ipv6, udp, tcp
- Actions: queue and void
- Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
- In total, up to 64 bytes of mask is allowed across all headers
@@ -255,7 +255,7 @@ Generic Flow API is supported. The baseline support is:
- **1300 and later series VICS with advanced filters enabled**
- Attributes: ingress
- - Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp
+ - Items: eth, vlan, ipv4, ipv6, udp, tcp, vxlan, inner eth, vlan, ipv4, ipv6, udp, tcp
- Actions: queue, mark, drop, flag and void
- Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
- In total, up to 64 bytes of mask is allowed across all headers
@@ -266,6 +266,12 @@ Generic Flow API is supported. The baseline support is:
- Action: count
+The VIC performs packet matching after applying VLAN strip. If VLAN
+stripping is enabled, EtherType in the ETH item corresponds to the
+stripped VLAN header's EtherType. Stripping does not affect the VLAN
+item. TCI and EtherType in the VLAN item are matched against those in
+the (stripped) VLAN header whether stripping is enabled or disabled.
+
More features may be added in future firmware and new versions of the VIC.
Please refer to the release notes.
@@ -450,6 +456,7 @@ PKT_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the
1000 for 1300 series VICs). Filters are checked for matching in the order they
were added. Since there currently is no grouping or priority support,
'catch-all' filters should be added last.
+ - The supported range of IDs for the 'MARK' action is 0 - 0xFFFD.
- **Statistics**
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index d3f90483..d57ddc2f 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -285,7 +285,7 @@ Inner RSS
Supports RX RSS hashing on Inner headers.
-* **[users] rte_flow_action_rss**: ``level``.
+* **[uses] rte_flow_action_rss**: ``level``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
@@ -495,7 +495,7 @@ Supports adding traffic mirroring rules.
Inline crypto
-------------
-Supports inline crypto processing (eg. inline IPsec). See Security library and PMD documentation for more details.
+Supports inline crypto processing (e.g. inline IPsec). See Security library and PMD documentation for more details.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
diff --git a/doc/guides/nics/features/qede.ini b/doc/guides/nics/features/qede.ini
index 0d081002..f69e4f84 100644
--- a/doc/guides/nics/features/qede.ini
+++ b/doc/guides/nics/features/qede.ini
@@ -23,6 +23,7 @@ N-tuple filter = Y
Tunnel filter = Y
Flow director = Y
Flow control = Y
+Flow API = Y
CRC offload = Y
VLAN offload = Y
L3 checksum offload = Y
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 40bf0f14..62e90d9f 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -571,7 +571,7 @@ bandwidth setting.
TC TX scheduling mode setting
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There're 2 TX scheduling modes for TCs, round robin and strict priority mode.
+There are 2 TX scheduling modes for TCs, round robin and strict priority mode.
If a TC is set to strict priority mode, it can consume unlimited bandwidth.
It means if APP has set the max bandwidth for that TC, it comes to no
effect.
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 1c294b06..975143f8 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -203,8 +203,8 @@ as a workaround.
X550 does not support legacy interrupt mode
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Desccription
-^^^^^^^^^^^^
+Description
+^^^^^^^^^^^
X550 cannot get interrupts if using ``uio_pci_generic`` module or using legacy
interrupt mode of ``igb_uio`` or ``vfio``. Because the errata of X550 states
that the Interrupt Status bit is not implemented. The errata is the item #22
diff --git a/doc/guides/nics/kni.rst b/doc/guides/nics/kni.rst
index 204fbd56..c4fc9637 100644
--- a/doc/guides/nics/kni.rst
+++ b/doc/guides/nics/kni.rst
@@ -64,7 +64,7 @@ backend device by default.
PMD arguments
-------------
-``no_request_thread``, by default PMD creates a phtread for each KNI interface
+``no_request_thread``, by default PMD creates a pthread for each KNI interface
to handle Linux network interface control commands, like ``ifconfig kni0 up``
With ``no_request_thread`` option, pthread is not created and control commands
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 436898ac..31238ae3 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -85,6 +85,11 @@ Limitations
- Forked secondary process not supported.
- All mempools must be initialized before rte_eth_dev_start().
+ - External memory unregistered in EAL memseg list cannot be used for DMA
+ unless such memory has been registered by ``mlx5_mr_update_ext_mp()`` in
+ primary process and remapped to the same virtual address in secondary
+ process. If the external memory is registered by primary process but has
+ different virtual address in secondary process, unexpected error may happen.
- Flow pattern without any specific vlan will match for vlan packets as well:
@@ -148,7 +153,7 @@ Limitations
- E-Switch VXLAN decapsulation Flow:
- - can be appiled to PF port only.
+ - can be applied to PF port only.
- must specify VF port action (packet redirection from PF to VF).
- must specify tunnel outer UDP local (destination) port, wildcards not allowed.
- must specify tunnel outer VNI, wildcards not allowed.
@@ -163,7 +168,7 @@ Limitations
- must specify the VXLAN item with tunnel outer parameters.
- must specify the tunnel outer VNI in the VXLAN item.
- must specify the tunnel outer remote (destination) UDP port in the VXLAN item.
- - must specify the tunnel outer local (source) IPv4 or IPv6 in the , this address will locally (with scope link) assigned to the outer network interace, wildcards not allowed.
+ - must specify the tunnel outer local (source) IPv4 or IPv6 in the , this address will locally (with scope link) assigned to the outer network interface, wildcards not allowed.
- must specify the tunnel outer remote (destination) IPv4 or IPv6 in the VXLAN item, group IPs allowed.
- must specify the tunnel outer destination MAC address in the VXLAN item, this address will be used to create neigh rule.
@@ -306,7 +311,7 @@ Run-time configuration
buffers per a packet, one large buffer is posted in order to receive multiple
packets on the buffer. A MPRQ buffer consists of multiple fixed-size strides
and each stride receives one packet. MPRQ can improve throughput for
- small-packet tarffic.
+ small-packet traffic.
When MPRQ is enabled, max_rx_pkt_len can be larger than the size of
user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
@@ -317,7 +322,7 @@ Run-time configuration
- ``mprq_log_stride_num`` parameter [int]
Log 2 of the number of strides for Multi-Packet Rx queue. Configuring more
- strides can reduce PCIe tarffic further. If configured value is not in the
+ strides can reduce PCIe traffic further. If configured value is not in the
range of device capability, the default value will be set with a warning
message. The default value is 4 which is 16 strides per a buffer, valid only
if ``mprq_en`` is set.
@@ -564,7 +569,7 @@ Either RDMA Core library with a recent enough Linux kernel release
(recommended) or Mellanox OFED, which provides compatibility with older
releases.
-RMDA Core with Linux Kernel
+RDMA Core with Linux Kernel
^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Minimal kernel version : v4.14 or the most recent 4.14-rc (see `Linux installation documentation`_)
diff --git a/doc/guides/nics/mvpp2.rst b/doc/guides/nics/mvpp2.rst
index b2ddeab5..f63d8587 100644
--- a/doc/guides/nics/mvpp2.rst
+++ b/doc/guides/nics/mvpp2.rst
@@ -91,7 +91,7 @@ Limitations
chance to start in a sane state.
- MUSDK architecture does not support changing configuration in run time.
- All nessesary configurations should be done before first dev_start().
+ All necessary configurations should be done before first dev_start().
- RX queue start/stop is not supported.
diff --git a/doc/guides/nics/netvsc.rst b/doc/guides/nics/netvsc.rst
index 87fabf5b..6dbb9a55 100644
--- a/doc/guides/nics/netvsc.rst
+++ b/doc/guides/nics/netvsc.rst
@@ -89,7 +89,7 @@ operations:
.. Note::
- The dpkd-devbind.py script can not be used since it only handles PCI devices.
+ The dpdk-devbind.py script can not be used since it only handles PCI devices.
Prerequisites
diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index 40065284..f449b19d 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -96,7 +96,7 @@ Non-supported Features
The features not yet supported include:
-- Receive queue interupts
+- Receive queue interrupts
- Priority-based flow control
@@ -207,12 +207,12 @@ Supported actions:
Validating flow rules depends on the firmware variant.
-Ethernet destinaton individual/group match
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Ethernet destination individual/group match
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ethernet item supports I/G matching, if only the corresponding bit is set
-in the mask of destination address. If destinaton address in the spec is
-multicast, it matches all multicast (and broadcast) packets, oherwise it
+in the mask of destination address. If destination address in the spec is
+multicast, it matches all multicast (and broadcast) packets, otherwise it
matches unicast packets that are not filtered by other flow rules.
Exceptions to flow rules
@@ -346,10 +346,10 @@ boolean parameters value.
- ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)
- Choose hardware tunning to be optimized for either throughput or
+ Choose hardware tuning to be optimized for either throughput or
low-latency.
**auto** allows NIC firmware to make a choice based on
- installed licences and firmware variant configured using **sfboot**.
+ installed licenses and firmware variant configured using **sfboot**.
- ``stats_update_period_ms`` [long] (default **1000**)
diff --git a/doc/guides/nics/szedata2.rst b/doc/guides/nics/szedata2.rst
index a2092f9c..94dba82d 100644
--- a/doc/guides/nics/szedata2.rst
+++ b/doc/guides/nics/szedata2.rst
@@ -89,7 +89,7 @@ The NFB cards are multi-port multi-queue cards, where (generally) data from any
Ethernet port may be sent to any queue.
They were historically represented in DPDK as a single port.
-However, the new NFB-200G2QL card employs an addon cable which allows to connect
+However, the new NFB-200G2QL card employs an add-on cable which allows to connect
it to two physical PCI-E slots at the same time (see the diagram below).
This is done to allow 200 Gbps of traffic to be transferred through the PCI-E
bus (note that a single PCI-E 3.0 x16 slot provides only 125 Gbps theoretical
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index 9a3d7b34..14c669b7 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -40,7 +40,7 @@ actual MAC address: ``00:64:74:61:70:[00-FF]``.
--vdev=net_tap0,mac="00:64:74:61:70:11"
The MAC address will have a user value passed as string. The MAC address is in
-format with delimeter ``:``. The string is byte converted to hex and you get
+format with delimiter ``:``. The string is byte converted to hex and you get
the actual MAC address: ``00:64:74:61:70:11``.
It is possible to specify a remote netdevice to capture packets from by adding
diff --git a/doc/guides/platform/dpaa.rst b/doc/guides/platform/dpaa.rst
index 39048715..6005f222 100644
--- a/doc/guides/platform/dpaa.rst
+++ b/doc/guides/platform/dpaa.rst
@@ -4,7 +4,7 @@
NXP QorIQ DPAA Board Support Package
====================================
-This doc has information about steps to setup QorIq dpaa
+This doc has information about steps to setup QorIQ dpaa
based layerscape platform and information about common offload
hw block drivers of **NXP QorIQ DPAA** SoC family.
@@ -38,7 +38,7 @@ Common Offload HW Block Drivers
Steps To Setup Platform
-----------------------
-There are four main pre-requisities for executing DPAA PMD on a DPAA
+There are four main pre-requisites for executing DPAA PMD on a DPAA
compatible board:
1. **ARM 64 Tool Chain**
diff --git a/doc/guides/platform/dpaa2.rst b/doc/guides/platform/dpaa2.rst
index 5a64406e..2586af0f 100644
--- a/doc/guides/platform/dpaa2.rst
+++ b/doc/guides/platform/dpaa2.rst
@@ -4,7 +4,7 @@
NXP QorIQ DPAA2 Board Support Package
=====================================
-This doc has information about steps to setup NXP QoriQ DPAA2 platform
+This doc has information about steps to setup NXP QorIQ DPAA2 platform
and information about common offload hw block drivers of
**NXP QorIQ DPAA2** SoC family.
@@ -48,7 +48,7 @@ Common Offload HW Block Drivers
Steps To Setup Platform
-----------------------
-There are four main pre-requisities for executing DPAA2 PMD on a DPAA2
+There are four main pre-requisites for executing DPAA2 PMD on a DPAA2
compatible board:
1. **ARM 64 Tool Chain**
diff --git a/doc/guides/prog_guide/bbdev.rst b/doc/guides/prog_guide/bbdev.rst
index 9de14443..658ffd40 100644
--- a/doc/guides/prog_guide/bbdev.rst
+++ b/doc/guides/prog_guide/bbdev.rst
@@ -78,7 +78,7 @@ From the application point of view, each instance of a bbdev device consists of
one or more queues identified by queue IDs. While different devices may have
different capabilities (e.g. support different operation types), all queues on
a device support identical configuration possibilities. A queue is configured
-for only one type of operation and is configured at initializations time.
+for only one type of operation and is configured at initialization time.
When an operation is enqueued to a specific queue ID, the result is dequeued
from the same queue ID.
@@ -678,7 +678,7 @@ bbdev framework, by giving a sample code performing a loop-back operation with a
baseband processor capable of transceiving data packets.
The following sample C-like pseudo-code shows the basic steps to encode several
-buffers using (**sw_trubo**) bbdev PMD.
+buffers using (**sw_turbo**) bbdev PMD.
.. code-block:: c
diff --git a/doc/guides/prog_guide/compressdev.rst b/doc/guides/prog_guide/compressdev.rst
index 87e26490..3ba4238c 100644
--- a/doc/guides/prog_guide/compressdev.rst
+++ b/doc/guides/prog_guide/compressdev.rst
@@ -17,7 +17,7 @@ Device Creation
Physical compression devices are discovered during the bus probe of the EAL function
which is executed at DPDK initialization, based on their unique device identifier.
-For eg. PCI devices can be identified using PCI BDF (bus/bridge, device, function).
+For e.g. PCI devices can be identified using PCI BDF (bus/bridge, device, function).
Specific physical compression devices, like other physical devices in DPDK can be
white-listed or black-listed using the EAL command line options.
@@ -379,7 +379,7 @@ using priv_xform would look like:
setup op->m_src and op->m_dst;
}
num_enqd = rte_compressdev_enqueue_burst(cdev_id, 0, comp_ops, NUM_OPS);
- /* wait for this to complete before enqueing next*/
+ /* wait for this to complete before enqueuing next*/
do {
num_deque = rte_compressdev_dequeue_burst(cdev_id, 0 , &processed_ops, NUM_OPS);
} while (num_dqud < num_enqd);
@@ -526,7 +526,7 @@ An example pseudocode to set up and process a stream having NUM_CHUNKS with each
op->src.length = CHUNK_LEN;
op->input_chksum = 0;
num_enqd = rte_compressdev_enqueue_burst(cdev_id, 0, &op[i], 1);
- /* wait for this to complete before enqueing next*/
+ /* wait for this to complete before enqueuing next*/
do {
num_deqd = rte_compressdev_dequeue_burst(cdev_id, 0 , &processed_ops, 1);
} while (num_deqd < num_enqd);
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index 8ee33c87..23770ffd 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -14,7 +14,7 @@ and AEAD symmetric and asymmetric Crypto operations.
Design Principles
-----------------
-The cryptodev library follows the same basic principles as those used in DPDKs
+The cryptodev library follows the same basic principles as those used in DPDK's
Ethernet Device framework. The Crypto framework provides a generic Crypto device
framework which supports both physical (hardware) and virtual (software) Crypto
devices as well as a generic Crypto API which allows Crypto devices to be
@@ -48,7 +48,7 @@ From the command line using the --vdev EAL option
* If DPDK application requires multiple software crypto PMD devices then required
number of ``--vdev`` with appropriate libraries are to be added.
- * An Application with crypto PMD instaces sharing the same library requires unique ID.
+ * An Application with crypto PMD instances sharing the same library requires unique ID.
Example: ``--vdev 'crypto_aesni_mb0' --vdev 'crypto_aesni_mb1'``
@@ -382,7 +382,7 @@ Operation Management and Allocation
The cryptodev library provides an API set for managing Crypto operations which
utilize the Mempool Library to allocate operation buffers. Therefore, it ensures
-that the crytpo operation is interleaved optimally across the channels and
+that the crypto operation is interleaved optimally across the channels and
ranks for optimal processing.
A ``rte_crypto_op`` contains a field indicating the pool that it originated from.
When calling ``rte_crypto_op_free(op)``, the operation returns to its original pool.
@@ -586,7 +586,7 @@ Sample code
There are various sample applications that show how to use the cryptodev library,
such as the L2fwd with Crypto sample application (L2fwd-crypto) and
-the IPSec Security Gateway application (ipsec-secgw).
+the IPsec Security Gateway application (ipsec-secgw).
While these applications demonstrate how an application can be created to perform
generic crypto operation, the required complexity hides the basic steps of
@@ -767,7 +767,7 @@ using one of the crypto PMDs available in DPDK.
/*
* Dequeue the crypto operations until all the operations
- * are proccessed in the crypto device.
+ * are processed in the crypto device.
*/
uint16_t num_dequeued_ops, total_num_dequeued_ops = 0;
do {
@@ -846,7 +846,7 @@ the order in which the transforms are passed indicates the order of the chaining
Not all asymmetric crypto xforms are supported for chaining. Currently supported
asymmetric crypto chaining is Diffie-Hellman private key generation followed by
public generation. Also, currently API does not support chaining of symmetric and
-asymmetric crypto xfroms.
+asymmetric crypto xforms.
Each xform defines specific asymmetric crypto algo. Currently supported are:
* RSA
diff --git a/doc/guides/prog_guide/dev_kit_build_system.rst b/doc/guides/prog_guide/dev_kit_build_system.rst
index da83a31e..a851a811 100644
--- a/doc/guides/prog_guide/dev_kit_build_system.rst
+++ b/doc/guides/prog_guide/dev_kit_build_system.rst
@@ -216,8 +216,6 @@ Objects
Misc
^^^^
-* rte.doc.mk: Documentation in the development kit framework
-
* rte.gnuconfigure.mk: Build an application that is configure-based.
* rte.subdir.mk: Build several directories in the development kit framework.
@@ -249,7 +247,7 @@ Creates the following symbol:
Which ``dpdk-pmdinfogen`` scans for. Using this information other relevant
bits of data can be exported from the object file and used to produce a
hardware support description, that ``dpdk-pmdinfogen`` then encodes into a
-json formatted string in the following format:
+JSON formatted string in the following format:
.. code-block:: c
diff --git a/doc/guides/prog_guide/efd_lib.rst b/doc/guides/prog_guide/efd_lib.rst
index cb1a1df8..2b355ff2 100644
--- a/doc/guides/prog_guide/efd_lib.rst
+++ b/doc/guides/prog_guide/efd_lib.rst
@@ -423,6 +423,6 @@ References
1- EFD is based on collaborative research work between Intel and
Carnegie Mellon University (CMU), interested readers can refer to the paper
-“Scaling Up Clustered Network Appliances with ScaleBricks;” Dong Zhou et al.
+"Scaling Up Clustered Network Appliances with ScaleBricks" Dong Zhou et al.
at SIGCOMM 2015 (`http://conferences.sigcomm.org/sigcomm/2015/pdf/papers/p241.pdf`)
for more information.
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 426acfc2..2bb77b01 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -147,6 +147,14 @@ A default validator callback is provided by EAL, which can be enabled with a
``--socket-limit`` command-line option, for a simple way to limit maximum amount
of memory that can be used by DPDK application.
+.. warning::
+ Memory subsystem uses DPDK IPC internally, so memory allocations/callbacks
+ and IPC must not be mixed: it is not safe to allocate/free memory inside
+ memory-related or IPC callbacks, and it is not safe to use IPC inside
+ memory-related callbacks. See chapter
+ :ref:`Multi-process Support <Multi-process_Support>` for more details about
+ DPDK IPC.
+
+ Legacy memory mode
This mode is enabled by specifying ``--legacy-mem`` command-line switch to the
@@ -441,6 +449,28 @@ Those TLS include *_cpuset* and *_socket_id*:
* *_socket_id* stores the NUMA node of the CPU set. If the CPUs in CPU set belong to different NUMA node, the *_socket_id* will be set to SOCKET_ID_ANY.
+Control Thread API
+~~~~~~~~~~~~~~~~~~
+
+It is possible to create Control Threads using the public API
+``rte_ctrl_thread_create()``.
+Those threads can be used for management/infrastructure tasks and are used
+internally by DPDK for multi process support and interrupt handling.
+
+Those threads will be scheduled on CPUs part of the original process CPU
+affinity from which the dataplane and service lcores are excluded.
+
+For example, on a 8 CPUs system, starting a dpdk application with -l 2,3
+(dataplane cores), then depending on the affinity configuration which can be
+controlled with tools like taskset (Linux) or cpuset (FreeBSD),
+
+- with no affinity configuration, the Control Threads will end up on
+ 0-1,4-7 CPUs.
+- with affinity restricted to 2-4, the Control Threads will end up on
+ CPU 4.
+- with affinity restricted to 2-3, the Control Threads will end up on
+ CPU 2 (master lcore, which is the default when no CPU is available).
+
.. _known_issue_label:
Known Issues
@@ -626,7 +656,7 @@ The most important fields in the structure and how they are used are described b
Malloc heap is a doubly-linked list, where each element keeps track of its
previous and next elements. Due to the fact that hugepage memory can come and
-go, neighbouring malloc elements may not necessarily be adjacent in memory.
+go, neighboring malloc elements may not necessarily be adjacent in memory.
Also, since a malloc element may span multiple pages, its contents may not
necessarily be IOVA-contiguous either - each malloc element is only guaranteed
to be virtually contiguous.
diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 0166bb45..7bc559c8 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -157,7 +157,7 @@ The servicing_weight member of struct rte_event_eth_rx_adapter_queue_conf
is applicable when the adapter uses a service core function. The application
has to enable Rx queue interrupts when configuring the ethernet device
using the ``rte_eth_dev_configure()`` function and then use a servicing_weight
-of zero when addding the Rx queue to the adapter.
+of zero when adding the Rx queue to the adapter.
The adapter creates a thread blocked on the interrupt, on an interrupt this
thread enqueues the port id and the queue id to a ring buffer. The adapter
@@ -175,9 +175,9 @@ Rx Callback for SW Rx Adapter
For SW based packet transfers, i.e., when the
``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` is not set in the adapter's
capabilities flags for a particular ethernet device, the service function
-temporarily enqueues mbufs to an event buffer before batch enqueueing these
+temporarily enqueues mbufs to an event buffer before batch enqueuing these
to the event device. If the buffer fills up, the service function stops
-dequeueing packets from the ethernet device. The application may want to
+dequeuing packets from the ethernet device. The application may want to
monitor the buffer fill level and instruct the service function to selectively
enqueue packets to the event device. The application may also use some other
criteria to decide which packets should enter the event device even when
diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst
index 8fcae546..27c51d25 100644
--- a/doc/guides/prog_guide/eventdev.rst
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -42,7 +42,7 @@ The rte_event structure contains the following metadata fields, which the
application fills in to have the event scheduled as required:
* ``flow_id`` - The targeted flow identifier for the enq/deq operation.
-* ``event_type`` - The source of this event, eg RTE_EVENT_TYPE_ETHDEV or CPU.
+* ``event_type`` - The source of this event, e.g. RTE_EVENT_TYPE_ETHDEV or CPU.
* ``sub_event_type`` - Distinguishes events inside the application, that have
the same event_type (see above)
* ``op`` - This field takes one of the RTE_EVENT_OP_* values, and tells the
@@ -265,7 +265,7 @@ Linking Queues and Ports
The final step is to "wire up" the ports to the queues. After this, the
eventdev is capable of scheduling events, and when cores request work to do,
the correct events are provided to that core. Note that the RX core takes input
-from eg: a NIC so it is not linked to any eventdev queues.
+from e.g.: a NIC so it is not linked to any eventdev queues.
Linking all workers to atomic queues, and the TX core to the single-link queue
can be achieved like this:
@@ -276,7 +276,7 @@ can be achieved like this:
uint8_t tx_port_id = 5;
uint8_t atomic_qs[] = {0, 1};
uint8_t single_link_q = 2;
- uin8t_t priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ uint8_t priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
for(int worker_port_id = 1; worker_port_id <= 4; worker_port_id++) {
int links_made = rte_event_port_link(dev_id, worker_port_id, atomic_qs, NULL, 2);
diff --git a/doc/guides/prog_guide/kernel_nic_interface.rst b/doc/guides/prog_guide/kernel_nic_interface.rst
index 33ea980e..7b8a481a 100644
--- a/doc/guides/prog_guide/kernel_nic_interface.rst
+++ b/doc/guides/prog_guide/kernel_nic_interface.rst
@@ -225,7 +225,7 @@ application functions:
``config_promiscusity``:
- Called when the user changes the promiscusity state of the KNI
+ Called when the user changes the promiscuity state of the KNI
interface. For example, when the user runs ``ip link set promisc
[on|off] dev <ifaceX>``. If the user sets this callback function to
NULL, but sets the ``port_id`` field to a value other than -1, a default
diff --git a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
index 56abee54..2459fd24 100644
--- a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
+++ b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
@@ -477,22 +477,22 @@ Create a bonded device in round robin mode with two slaves specified by their PC
.. code-block:: console
- $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0, slave=0000:00a:00.01,slave=0000:004:00.00' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0,slave=0000:0a:00.01,slave=0000:04:00.00' -- --port-topology=chained
Create a bonded device in round robin mode with two slaves specified by their PCI address and an overriding MAC address:
.. code-block:: console
- $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0, slave=0000:00a:00.01,slave=0000:004:00.00,mac=00:1e:67:1d:fd:1d' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0,slave=0000:0a:00.01,slave=0000:04:00.00,mac=00:1e:67:1d:fd:1d' -- --port-topology=chained
Create a bonded device in active backup mode with two slaves specified, and a primary slave specified by their PCI addresses:
.. code-block:: console
- $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=1, slave=0000:00a:00.01,slave=0000:004:00.00,primary=0000:00a:00.01' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=1,slave=0000:0a:00.01,slave=0000:04:00.00,primary=0000:0a:00.01' -- --port-topology=chained
Create a bonded device in balance mode with two slaves specified by their PCI addresses, and a transmission policy of layer 3 + 4 forwarding:
.. code-block:: console
- $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=2, slave=0000:00a:00.01,slave=0000:004:00.00,xmit_policy=l34' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=2,slave=0000:0a:00.01,slave=0000:04:00.00,xmit_policy=l34' -- --port-topology=chained
diff --git a/doc/guides/prog_guide/lpm_lib.rst b/doc/guides/prog_guide/lpm_lib.rst
index 99563a4a..1609a57d 100644
--- a/doc/guides/prog_guide/lpm_lib.rst
+++ b/doc/guides/prog_guide/lpm_lib.rst
@@ -195,4 +195,4 @@ References
`http://www.ietf.org/rfc/rfc1519 <http://www.ietf.org/rfc/rfc1519>`_
* Pankaj Gupta, Algorithms for Routing Lookups and Packet Classification, PhD Thesis, Stanford University,
- 2000 (`http://klamath.stanford.edu/~pankaj/thesis/ thesis_1sided.pdf <http://klamath.stanford.edu/~pankaj/thesis/%20thesis_1sided.pdf>`_ )
+ 2000 (`http://klamath.stanford.edu/~pankaj/thesis/thesis_1sided.pdf <http://klamath.stanford.edu/~pankaj/thesis/thesis_1sided.pdf>`_ )
diff --git a/doc/guides/prog_guide/metrics_lib.rst b/doc/guides/prog_guide/metrics_lib.rst
index e68e4e74..89bc7d68 100644
--- a/doc/guides/prog_guide/metrics_lib.rst
+++ b/doc/guides/prog_guide/metrics_lib.rst
@@ -25,7 +25,7 @@ individual device. Since the metrics library is self-contained, the only
restriction on port numbers is that they are less than ``RTE_MAX_ETHPORTS``
- there is no requirement for the ports to actually exist.
-Initialising the library
+Initializing the library
------------------------
Before the library can be used, it has to be initialized by calling
diff --git a/doc/guides/prog_guide/multi_proc_support.rst b/doc/guides/prog_guide/multi_proc_support.rst
index 1384fe33..a84083b9 100644
--- a/doc/guides/prog_guide/multi_proc_support.rst
+++ b/doc/guides/prog_guide/multi_proc_support.rst
@@ -176,7 +176,7 @@ Some of these are documented below:
* The use of function pointers between multiple processes running based of different compiled binaries is not supported,
since the location of a given function in one process may be different to its location in a second.
- This prevents the librte_hash library from behaving properly as in a multi-threaded instance,
+ This prevents the librte_hash library from behaving properly as in a multi-process instance,
since it uses a pointer to the hash function internally.
To work around this issue, it is recommended that multi-process applications perform the hash calculations by directly calling
@@ -263,9 +263,9 @@ To send a request, a message descriptor ``rte_mp_msg`` must be populated.
Additionally, a ``timespec`` value must be specified as a timeout, after which
IPC will stop waiting and return.
-For synchronous synchronous requests, the ``rte_mp_reply`` descriptor must also
-be created. This is where the responses will be stored. The list of fields that
-will be populated by IPC are as follows:
+For synchronous requests, the ``rte_mp_reply`` descriptor must also be created.
+This is where the responses will be stored.
+The list of fields that will be populated by IPC are as follows:
* ``nb_sent`` - number indicating how many requests were sent (i.e. how many
peer processes were active at the time of the request).
@@ -273,7 +273,7 @@ will be populated by IPC are as follows:
those peer processes that were active at the time of request, how many have
replied)
* ``msgs`` - pointer to where all of the responses are stored. The order in
- which responses appear is undefined. Whendoing sycnrhonous requests, this
+ which responses appear is undefined. When doing synchronous requests, this
memory must be freed by the requestor after request completes!
For asynchronous requests, a function pointer to the callback function must be
@@ -309,6 +309,13 @@ If a response is required, a new ``rte_mp_msg`` message descriptor must be
constructed and sent via ``rte_mp_reply()`` function, along with ``peer``
pointer. The resulting response will then be delivered to the correct requestor.
+.. warning::
+ Simply returning a value when processing a request callback will not send a
+ response to the request - it must always be explicitly sent even in case
+ of errors. Implementation of error signalling rests with the application,
+ there is no built-in way to indicate success or error for a request. Failing
+ to do so will cause the requestor to time out while waiting on a response.
+
Misc considerations
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -318,6 +325,11 @@ supported. However, since sending messages (not requests) does not involve an
IPC thread, sending messages while processing another message or request is
supported.
+Since the memory sybsystem uses IPC internally, memory allocations and IPC must
+not be mixed: it is not safe to use IPC inside a memory-related callback, nor is
+it safe to allocate/free memory inside IPC callbacks. Attempting to do so may
+lead to a deadlock.
+
Asynchronous request callbacks may be triggered either from IPC thread or from
interrupt thread, depending on whether the request has timed out. It is
therefore suggested to avoid waiting for interrupt-based events (such as alarms)
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index b2cf4835..6fae39f9 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -374,9 +374,9 @@ parameters to those ports.
this argument allows user to specify which switch ports to enable port
representors for.::
- -w BDBF,representor=0
- -w BDBF,representor=[0,4,6,9]
- -w BDBF,representor=[0-31]
+ -w DBDF,representor=0
+ -w DBDF,representor=[0,4,6,9]
+ -w DBDF,representor=[0-31]
Note: PMDs are not required to support the standard device arguments and users
should consult the relevant PMD documentation to see support devargs.
diff --git a/doc/guides/prog_guide/profile_app.rst b/doc/guides/prog_guide/profile_app.rst
index 02f05614..c1b29f93 100644
--- a/doc/guides/prog_guide/profile_app.rst
+++ b/doc/guides/prog_guide/profile_app.rst
@@ -64,7 +64,7 @@ The default ``cntvct_el0`` based ``rte_rdtsc()`` provides a portable means to
get a wall clock counter in user space. Typically it runs at <= 100MHz.
The alternative method to enable ``rte_rdtsc()`` for a high resolution wall
-clock counter is through the armv8 PMU subsystem. The PMU cycle counter runs
+clock counter is through the ARMv8 PMU subsystem. The PMU cycle counter runs
at CPU frequency. However, access to the PMU cycle counter from user space is
not enabled by default in the arm64 linux kernel. It is possible to enable
cycle counter for user space access by configuring the PMU from the privileged
@@ -75,7 +75,7 @@ scheme. Application can choose the PMU based implementation with
``CONFIG_RTE_ARM_EAL_RDTSC_USE_PMU``.
The example below shows the steps to configure the PMU based cycle counter on
-an armv8 machine.
+an ARMv8 machine.
.. code-block:: console
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index dbf4999a..4e9b4440 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -2131,7 +2131,7 @@ as defined in the ``rte_flow_action_raw_decap``
This action modifies the payload of matched flows. The data supplied must
be a valid header, either holding layer 2 data in case of removing layer 2
-before eincapsulation of layer 3 tunnel (for example MPLSoGRE) or complete
+before encapsulation of layer 3 tunnel (for example MPLSoGRE) or complete
tunnel definition starting from layer 2 and moving to the tunnel item itself.
When applied to the original packet the resulting packet must be a
valid packet.
@@ -2281,7 +2281,7 @@ Action: ``DEC_TTL``
Decrease TTL value.
If there is no valid RTE_FLOW_ITEM_TYPE_IPV4 or RTE_FLOW_ITEM_TYPE_IPV6
-in pattern, Some PMDs will reject rule because behaviour will be undefined.
+in pattern, Some PMDs will reject rule because behavior will be undefined.
.. _table_rte_flow_action_dec_ttl:
@@ -2299,7 +2299,7 @@ Action: ``SET_TTL``
Assigns a new TTL value.
If there is no valid RTE_FLOW_ITEM_TYPE_IPV4 or RTE_FLOW_ITEM_TYPE_IPV6
-in pattern, Some PMDs will reject rule because behaviour will be undefined.
+in pattern, Some PMDs will reject rule because behavior will be undefined.
.. _table_rte_flow_action_set_ttl:
@@ -2725,7 +2725,7 @@ Caveats
- API operations are synchronous and blocking (``EAGAIN`` cannot be
returned).
-- There is no provision for reentrancy/multi-thread safety, although nothing
+- There is no provision for re-entrancy/multi-thread safety, although nothing
should prevent different devices from being configured at the same
time. PMDs may protect their control path functions accordingly.
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
index cb70caa7..7d0734a3 100644
--- a/doc/guides/prog_guide/rte_security.rst
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -40,7 +40,7 @@ Inline Crypto
~~~~~~~~~~~~~
RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
-The crypto processing for security protocol (e.g. IPSec) is processed
+The crypto processing for security protocol (e.g. IPsec) is processed
inline during receive and transmission on NIC port. The flow based
security action should be configured on the port.
@@ -48,7 +48,7 @@ Ingress Data path - The packet is decrypted in RX path and relevant
crypto status is set in Rx descriptors. After the successful inline
crypto processing the packet is presented to host as a regular Rx packet
however all security protocol related headers are still attached to the
-packet. e.g. In case of IPSec, the IPSec tunnel headers (if any),
+packet. e.g. In case of IPsec, the IPsec tunnel headers (if any),
ESP/AH headers will remain in the packet but the received packet
contains the decrypted data where the encrypted data was when the packet
arrived. The driver Rx path check the descriptors and and based on the
@@ -111,7 +111,7 @@ Inline protocol offload
~~~~~~~~~~~~~~~~~~~~~~~
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
-The crypto and protocol processing for security protocol (e.g. IPSec)
+The crypto and protocol processing for security protocol (e.g. IPsec)
is processed inline during receive and transmission. The flow based
security action should be configured on the port.
@@ -119,7 +119,7 @@ Ingress Data path - The packet is decrypted in the RX path and relevant
crypto status is set in the Rx descriptors. After the successful inline
crypto processing the packet is presented to the host as a regular Rx packet
but all security protocol related headers are optionally removed from the
-packet. e.g. in the case of IPSec, the IPSec tunnel headers (if any),
+packet. e.g. in the case of IPsec, the IPsec tunnel headers (if any),
ESP/AH headers will be removed from the packet and the received packet
will contains the decrypted packet only. The driver Rx path checks the
descriptors and based on the crypto status sets additional flags in
@@ -132,7 +132,7 @@ to identify the security processing done on the packet.
The underlying device in this case is stateful. It is expected that
the device shall support crypto processing for all kind of packets matching
to a given flow, this includes fragmented packets (post reassembly).
- E.g. in case of IPSec the device may internally manage anti-replay etc.
+ E.g. in case of IPsec the device may internally manage anti-replay etc.
It will provide a configuration option for anti-replay behavior i.e. to drop
the packets or pass them to driver with error flags set in the descriptor.
@@ -150,7 +150,7 @@ to cross the MTU size.
.. note::
The underlying device will manage state information required for egress
- processing. E.g. in case of IPSec, the seq number will be added to the
+ processing. E.g. in case of IPsec, the seq number will be added to the
packet, however the device shall provide indication when the sequence number
is about to overflow. The underlying device may support post encryption TSO.
@@ -199,13 +199,13 @@ crypto device.
Decryption: The packet is sent to the crypto device for security
protocol processing. The device will decrypt the packet and it will also
optionally remove additional security headers from the packet.
-E.g. in case of IPSec, IPSec tunnel headers (if any), ESP/AH headers
+E.g. in case of IPsec, IPsec tunnel headers (if any), ESP/AH headers
will be removed from the packet and the decrypted packet may contain
plain data only.
.. note::
- In case of IPSec the device may internally manage anti-replay etc.
+ In case of IPsec the device may internally manage anti-replay etc.
It will provide a configuration option for anti-replay behavior i.e. to drop
the packets or pass them to driver with error flags set in descriptor.
@@ -217,7 +217,7 @@ for any protocol header addition.
.. note::
- In the case of IPSec, the seq number will be added to the packet,
+ In the case of IPsec, the seq number will be added to the packet,
It shall provide an indication when the sequence number is about to
overflow.
@@ -549,7 +549,7 @@ IPsec related configuration parameters are defined in ``rte_security_ipsec_xform
struct rte_security_ipsec_sa_options options;
/**< various SA options */
enum rte_security_ipsec_sa_direction direction;
- /**< IPSec SA Direction - Egress/Ingress */
+ /**< IPsec SA Direction - Egress/Ingress */
enum rte_security_ipsec_sa_protocol proto;
/**< IPsec SA Protocol - AH/ESP */
enum rte_security_ipsec_sa_mode mode;
diff --git a/doc/guides/prog_guide/traffic_management.rst b/doc/guides/prog_guide/traffic_management.rst
index 98ac4310..05b34d93 100644
--- a/doc/guides/prog_guide/traffic_management.rst
+++ b/doc/guides/prog_guide/traffic_management.rst
@@ -39,7 +39,7 @@ whether a specific implementation does meet the needs to the user application.
At the TM level, users can get high level idea with the help of various
parameters such as maximum number of nodes, maximum number of hierarchical
levels, maximum number of shapers, maximum number of private shapers, type of
-scheduling algorithm (Strict Priority, Weighted Fair Queueing , etc.), etc.,
+scheduling algorithm (Strict Priority, Weighted Fair Queuing , etc.), etc.,
supported by the implementation.
Likewise, users can query the capability of the TM at the hierarchical level to
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index c77df338..27c3af8f 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -63,7 +63,7 @@ The following is an overview of some key Vhost API functions:
512).
* zero copy is really good for VM2VM case. For iperf between two VMs, the
- boost could be above 70% (when TSO is enableld).
+ boost could be above 70% (when TSO is enabled).
* For zero copy in VM2NIC case, guest Tx used vring may be starved if the
PMD driver consume the mbuf but not release them timely.
diff --git a/doc/guides/rawdevs/ifpga_rawdev.rst b/doc/guides/rawdevs/ifpga_rawdev.rst
index d400db6e..a3d92a62 100644
--- a/doc/guides/rawdevs/ifpga_rawdev.rst
+++ b/doc/guides/rawdevs/ifpga_rawdev.rst
@@ -91,7 +91,7 @@ Run-time parameters
-------------------
This driver is invoked automatically in systems added with Intel FPGA,
-but PR and IFPGA Bus scan is trigged by command line using
+but PR and IFPGA Bus scan is triggered by command line using
``--vdev 'ifpga_rawdev_cfg`` EAL option.
The following device parameters are supported:
diff --git a/doc/guides/rel_notes/known_issues.rst b/doc/guides/rel_notes/known_issues.rst
index 358dfa32..276327c1 100644
--- a/doc/guides/rel_notes/known_issues.rst
+++ b/doc/guides/rel_notes/known_issues.rst
@@ -676,7 +676,7 @@ igb uio legacy mode can not be used in X710/XL710/XXV710
**Description**:
X710/XL710/XXV710 NICs lack support for indicating INTx is asserted via the interrupt
- bit in the PCI status register. Linux delected them from INTx support table. The related
+ bit in the PCI status register. Linux deleted them from INTx support table. The related
`commit <https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/commit/drivers/pci/quirks.c?id=8bcf4525c5d43306c5fd07e132bc8650e3491aec>`_.
**Implication**:
@@ -722,9 +722,9 @@ Linux kernel 4.10.0 iommu attribute read error
**Description**:
When VT-d is enabled (``iommu=pt intel_iommu=on``), reading IOMMU attributes from
/sys/devices/virtual/iommu/dmarXXX/intel-iommu/cap on Linux kernel 4.10.0 error.
- This bug is fixed in `Linux commmit a7fdb6e648fb
+ This bug is fixed in `Linux commit a7fdb6e648fb
<https://patchwork.kernel.org/patch/9595727/>`_,
- This bug is introduced in `Linux commmit 39ab9555c241
+ This bug is introduced in `Linux commit 39ab9555c241
<https://patchwork.kernel.org/patch/9554403/>`_,
**Implication**:
@@ -842,7 +842,7 @@ AVX-512 support disabled
drop.
On DPDK v19.02 ``AVX-512`` disable scope is reduced to ``GCC`` and ``binutils version 2.30`` based
- on information accured from the GCC community defect.
+ on information accrued from the GCC community defect.
**Reason**:
Generated ``AVX-512`` code cause crash:
diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst
index 2a93af32..6448b6cb 100644
--- a/doc/guides/rel_notes/release_17_11.rst
+++ b/doc/guides/rel_notes/release_17_11.rst
@@ -168,7 +168,7 @@ New Features
* The DES CBC algorithm.
* The DES DOCSIS BPI algorithm.
- This change requires version 0.47 of the IPSec Multi-buffer library. For
+ This change requires version 0.47 of the IPsec Multi-buffer library. For
more details see the :doc:`../cryptodevs/aesni_mb` documentation.
* **Updated the OpenSSL PMD.**
@@ -198,7 +198,7 @@ New Features
* **Added the Security Offload Library.**
Added an experimental library - ``rte_security``. This provide security APIs
- for protocols like IPSec using inline ipsec offload to ethernet devices or
+ for protocols like IPsec using inline ipsec offload to ethernet devices or
full protocol offload with lookaside crypto devices.
See the :doc:`../prog_guide/rte_security` section of the DPDK Programmers
@@ -207,11 +207,11 @@ New Features
* **Updated the DPAA2_SEC crypto driver to support rte_security.**
Updated the ``dpaa2_sec`` crypto PMD to support ``rte_security`` lookaside
- protocol offload for IPSec.
+ protocol offload for IPsec.
* **Updated the IXGBE ethernet driver to support rte_security.**
- Updated ixgbe ethernet PMD to support ``rte_security`` inline IPSec offload.
+ Updated ixgbe ethernet PMD to support ``rte_security`` inline IPsec offload.
* **Updated i40e driver to support GTP-C/GTP-U.**
@@ -509,7 +509,7 @@ ABI Changes
* **New parameter added to rte_eth_dev.**
A new parameter ``security_ctx`` has been added to ``rte_eth_dev`` to
- support security operations like IPSec inline.
+ support security operations like IPsec inline.
* **New parameter added to rte_cryptodev.**
diff --git a/doc/guides/rel_notes/release_18_11.rst b/doc/guides/rel_notes/release_18_11.rst
index 2f386fc5..071e010d 100644
--- a/doc/guides/rel_notes/release_18_11.rst
+++ b/doc/guides/rel_notes/release_18_11.rst
@@ -862,8 +862,8 @@ Tested Platforms
* Ubuntu 18.04.1 LTS with NXP QorIQ LSDK 1809 support packages
* Ubuntu 16.04.3 LTS with NXP QorIQ LSDK 1803 support packages
-Fixes and Validation in 18.11 Stable Release
---------------------------------------------
+18.11.1 Release Notes
+----------------------
18.11.1 Fixes
~~~~~~~~~~~~~
@@ -1177,3 +1177,514 @@ Fixes and Validation in 18.11 Stable Release
* vhost single/multi queues and cross-NUMA
* vhostclient reconnect
* vhost live migration with single/multi queues and cross-NUMA
+
+18.11.2 Release Notes
+----------------------
+
+18.11.2 Fixes
+~~~~~~~~~~~~~
+
+* acl: fix compiler flags with meson and AVX2 runtime
+* app/bbdev: replace sprintf with snprintf or strlcpy
+* app/crypto-perf: check range of socket id
+* app/pdump: remove only created vdevs
+* app/test: fix build with musl libc
+* app/test: fix flags with meson
+* app/test: fix sprintf with strlcat
+* app/testpmd: add missing newline when showing statistics
+* app/testpmd: extend forwarding statistics to 64 bits
+* app/testpmd: fix a typo in log message
+* app/testpmd: fix help info for interactive commands
+* app/testpmd: fix hex string parser support for flow API
+* app/testpmd: fix mempool free on exit
+* app/testpmd: fix offload flags after port config
+* app/testpmd: fix return value check
+* app/testpmd: fix stdout flush after printing stats
+* app/testpmd: fix Tx QinQ set
+* app/testpmd: fix Tx VLAN and QinQ dependency
+* app/testpmd: fix typo in comment
+* app/testpmd: fix unintentional integer overflow
+* app/testpmd: fix variable use before null check
+* app/testpmd: remove unused field from port struct
+* app/testpmd: remove useless casts on statistics
+* bitrate: fix unchecked return value
+* build: fix crash by disabling AVX512 with binutils 2.31
+* build: fix meson binutils workaround
+* build: fix ninja install on FreeBSD
+* build: remove meson warning for Arm
+* build: use default flags for default Arm machine
+* bus/dpaa: fix Rx discard register mask
+* bus/fslmc: decrease log level for unsupported devices
+* bus/fslmc: fix build with musl libc
+* bus/fslmc: fix warning with GCC 9
+* bus/fslmc: fix warning with GCC 9
+* bus/fslmc: remove unused include of error.h
+* bus/vdev: fix debug message on probing
+* bus/vdev: fix hotplug twice
+* bus/vmbus: fix check for mmap failure
+* bus/vmbus: fix resource leak on error
+* bus/vmbus: fix secondary process setup
+* bus/vmbus: map ring in secondary process
+* bus/vmbus: stop mapping if empty resource found
+* cfgfile: replace strcat with strlcat
+* ci: add a distinguisher to the extra Travis builds
+* ci: enable ccache in Travis
+* ci: introduce Travis builds for GitHub repositories
+* common/cpt: fix null auth only
+* compress/isal: fix compression stream initialization
+* compress/isal: fix getting information about CPU
+* compress/qat: fix setup inter buffers
+* crypto/caam_jr: fix memory leak and illegal access
+* crypto/caam_jr: fix shared descriptor endianness
+* crypto/caam_jr: fix total length in auth only s/g
+* cryptodev: fix driver name comparison
+* crypto/dpaa2_sec: fix offset calculation for GCM
+* crypto/dpaa2_sec: fix session clearing
+* crypto/dpaa: fix session destroy
+* crypto/kasumi: fix dependency check
+* crypto/openssl: fix big numbers after computations
+* crypto/openssl: fix modexp
+* crypto/qat: fix null cipher algo for non 8-byte multiple
+* crypto/snow3g: add to meson build
+* crypto/virtio: fix IV offset
+* crypto/virtio: use local log type
+* crypto/zuc: fix dependency check
+* devtools: accept experimental symbol promotion
+* devtools: add libelf dependency to build test
+* devtools: fix build test on FreeBSD
+* devtools: fix check of symbol added as stable API
+* devtools: fix result of svg include check
+* devtools: fix symbol name in check log
+* devtools: fix test of some build options
+* devtools: skip meson build for missing compilers
+* devtools: support older compilers with meson test
+* devtools: test build of zlib PMD
+* doc: add flow API to qede NIC features
+* doc: add missing algorithms for AESNI-MB PMD
+* doc: fix ABI check script examples
+* doc: fix a minor typo in testpmd guide
+* doc: fix broken link in LPM guide
+* doc: fix examples in bonding guide
+* doc: fix formatting in testpmd guide
+* doc: fix heading levels in bbdev test guide
+* doc: fix interactive commands in testpmd guide
+* doc: fix JSON interface for power sample
+* doc: fix link in Linux getting started guide
+* doc: fix links to doxygen and sphinx sites
+* doc: fix missing asymmetric crypto table
+* doc: fix PCI whitelist typo in prog guide
+* doc: fix spelling in testpmd guide
+* doc: fix spelling reported by aspell in comments
+* doc: fix spelling reported by aspell in guides
+* doc: fix tag for inner RSS feature
+* doc: fix two typos in contributing guide
+* doc: fix typo in IPC guide
+* doc: fix typo in mlx5 guide
+* doc: fix typos in mlx5 guide
+* doc: fix typos in testpmd user guide
+* doc: remove reference to rte.doc.mk in programmers guide
+* doc: update cross Arm toolchain in Linux guide
+* drivers/event: disable OcteonTx for buggy Arm compilers
+* drivers: fix SPDX license id consistency
+* drivers/net: fix possible overflow using strlcat
+* drivers/net: fix shifting 32-bit signed variable 31 times
+* drivers/qat: fix queue pair NUMA node
+* eal: fix check when retrieving current CPU affinity
+* eal: fix cleanup in no-shconf mode
+* eal: fix control threads pinnning
+* eal: fix core list validation with disabled cores
+* eal: fix formatting of hotplug error message
+* eal: fix typo in comment of vector function
+* eal: initialize alarms early
+* eal/linux: fix log levels for pagemap reading failure
+* eal/linux: remove thread ID from debug message
+* eal/ppc: fix global memory barrier
+* eal: remove dead code in core list parsing
+* eal: restrict control threads to startup CPU affinity
+* eal: support strlcat function
+* eal: tighten permissions on shared memory files
+* ethdev: fix a typo
+* ethdev: fix method name in doxygen comment
+* ethdev: fix typo in error messages
+* ethdev: remove unused variable
+* eventdev: fix crypto adapter
+* eventdev: fix Rx adapter event flush
+* eventdev: update references to removed function
+* event/dsw: fix capability flags
+* event/dsw: ignore scheduling type for single-link queues
+* event/opdl: replace sprintf with snprintf
+* event/sw: fix enqueue checks in self-test
+* examples/ethtool: fix two typos
+* examples/fips_validation: fix CMAC test
+* examples/ip_pipeline: disable build when no epoll
+* examples/ipsec-secgw: fix AES-CTR block size
+* examples/ipsec-secgw: fix build error log
+* examples/ipsec-secgw: fix debug logs
+* examples/ipsec-secgw: fix SPD no-match case
+* examples/l2fwd-cat: fix build on FreeBSD
+* examples/multi_process: fix buffer underrun
+* examples/power: fix buffer overrun
+* examples/power: fix build with some disabled PMDs
+* examples/power: fix json null termination
+* examples/power: fix overflowed value
+* examples/power: fix resource leak
+* examples/power: fix string null termination
+* examples/power: fix string overflow
+* examples/power: fix unreachable VF MAC init
+* examples/vhost_crypto: fix dependency on vhost library
+* examples/vhost_scsi: fix null-check for parameter
+* hash: fix doc about thread/process safety
+* hash: fix position returned in free slots
+* hash: fix total entries count
+* ipc: add warnings about correct API usage
+* ipc: add warnings about not using IPC with memory API
+* ipc: fix memory leak on request failure
+* ipc: fix send error handling
+* ipc: handle more invalid parameter cases
+* ipc: harden message receive
+* ipc: unlock on failure
+* kni: fix build with Linux 5.1
+* kni: fix type for MAC address
+* maintainers: update for IBM POWER
+* malloc: fix documentation of realloc function
+* malloc: fix IPC message initialization
+* mbuf: fix a typo
+* mbuf: update Tx VLAN and QinQ flags documentation
+* mem: limit use of address hint
+* mempool/dpaa2: fix continuous print on empty pool
+* mem: warn user when running without NUMA support
+* mk: disable packed member pointer warning for telemetry
+* mk: disable warning for packed member pointer
+* mk: fix AVX512 disabled warning on non x86
+* mk: fix build of shared library with libbsd
+* net/atlantic: bad logic with offsets talking with firmware
+* net/atlantic: eeprom get/set should consider offset
+* net/atlantic: eliminate excessive log levels on Rx/Tx
+* net/atlantic: enable broadcast traffic
+* net/atlantic: error handling for mailbox access
+* net/atlantic: extra line at eof
+* net/atlantic: fix buffer overflow
+* net/atlantic: fix EEPROM get for small and uneven lengths
+* net/atlantic: fix link configuration
+* net/atlantic: fix max eeprom size
+* net/atlantic: fix missing VLAN filter offload
+* net/atlantic: fix negative error codes
+* net/atlantic: fix xstats return
+* net/atlantic: flow control settings synchronization on rx
+* net/atlantic: remove extra checks for error codes
+* net/atlantic: remove unused variable
+* net/atlantic: use capability bits to detect eeprom access
+* net/atlantic: validity check for eeprom dev address
+* net/avf: fix admin queue interrupt for ICE
+* net/bnx2x: fix DMAE timeout
+* net/bnx2x: fix memory leak
+* net/bnx2x: fix MTU for jumbo frame
+* net/bnx2x: fix optic module verification
+* net/bnx2x: fix race for periodic flags
+* net/bnx2x: fix ramrod timeout
+* net/bnxt: fix big endian build
+* net/bnxt: fix Rx VLAN offload flags
+* net/bnxt: silence IOVA warnings
+* net/bnxt: support IOVA VA mode
+* net/bnxt: suppress spurious error log
+* net/bonding: avoid warning for invalid port
+* net/bonding: fix buffer length when printing strings
+* net/bonding: fix LACP negotiation
+* net/bonding: fix link status
+* net/bonding: fix packet count type for LACP
+* net/bonding: fix port id types
+* net/bonding: fix queue index types
+* net/bonding: fix slave id types
+* net/bonding: fix slave Tx burst for mode 4
+* net/bonding: fix Tx in 802.3ad mode
+* net/bonding: fix values of descriptor limits
+* net/cxgbe: fix colliding function names
+* net/cxgbe: fix missing checksum flags and packet type
+* net/cxgbe: update Chelsio T5/T6 NIC device ids
+* net/enetc: fix big endian build and buffer allocation
+* net/enetc: fix crash at high speed traffic
+* net/enetc: fix SMMU unhandled context fault
+* net/enic: allow flow mark ID 0
+* net/enic: check for unsupported flow item types
+* net/enic: fix endianness in VLAN match
+* net/enic: fix flow director SCTP matching
+* net/enic: fix inner packet matching
+* net/enic: fix max MTU calculation
+* net/enic: fix SCTP match for flow API
+* net/enic: fix VLAN inner type matching for old hardware
+* net/enic: fix VXLAN match
+* net/enic: move arguments into struct
+* net/enic: reset VXLAN port regardless of overlay offload
+* net: fix Tx VLAN flag for offload emulation
+* net/fm10k: fix VLAN strip offload flag
+* net/i40e: fix dereference before check when getting EEPROM
+* net/i40e: fix dereference before null check in mbuf release
+* net/i40e: fix link speed for X722
+* net/i40e: fix logging on VF close
+* net/i40e: fix queue number check
+* net/i40e: fix scattered Rx enabling
+* net/i40e: fix time sync for 25G
+* net/i40e: forbid two RSS flow rules
+* net/i40e: log when provided RSS key is not valid
+* net/iavf: fix info get
+* net/ixgbe: fix warning with GCC 9
+* net/ixgbe: restore VLAN filter for VF
+* net/kni: fix return value check
+* net/mlx4: change device reference for secondary process
+* net/mlx4: fix memory region cleanup
+* net/mlx5: check Tx queue size overflow
+* net/mlx5: fix comments mixing Rx and Tx
+* net/mlx5: fix errno typos in comments
+* net/mlx5: fix external memory registration
+* net/mlx5: fix flow priorities probing error path
+* net/mlx5: fix hex dump of error completion
+* net/mlx5: fix instruction hotspot on replenishing Rx buffer
+* net/mlx5: fix max number of queues for NEON Tx
+* net/mlx5: fix memory event on secondary process
+* net/mlx5: fix memory region cleanup
+* net/mlx5: fix Multi-Packet RQ mempool name
+* net/mlx5: fix packet inline on Tx queue wraparound
+* net/mlx5: fix release of Rx queue object
+* net/mlx5: fix RSS validation function
+* net/mlx5: fix sync when handling Tx completions
+* net/mlx5: fix Tx metadata for multi-segment packet
+* net/mlx: prefix private structure
+* net/mlx: remove debug messages on datapath
+* net/netvsc: fix include of fcntl.h
+* net/netvsc: fix VF support with secondary process
+* net/netvsc: remove useless condition
+* net/netvsc: reset mbuf port on VF Rx
+* net/nfp: check return value
+* net/nfp: fix build with musl libc
+* net/nfp: fix file descriptor check
+* net/nfp: fix memory leak
+* net/nfp: fix possible buffer overflow
+* net/nfp: fix potential integer overflow
+* net/nfp: fix RSS query
+* net/nfp: fix setting MAC address
+* net/octeontx: fix vdev name
+* net/pcap: fix memory leak
+* net/qede: fix Rx packet drop
+* net/qede: fix Tx packet prepare for tunnel packets
+* net/qede: support IOVA VA mode
+* net/ring: avoid hard-coded length
+* net/ring: check length of ring name
+* net/ring: fix coding style
+* net/ring: fix return value check
+* net/ring: use calloc style where appropriate
+* net/sfc: fix logging from secondary process
+* net/sfc: fix MTU change to check Rx scatter consistency
+* net/sfc: fix speed capabilities reported in device info
+* net/sfc: improve TSO header length check in EF10 datapath
+* net/sfc: improve TSO header length check in EFX datapath
+* net/sfc: log port ID as 16-bit unsigned integer on panic
+* net/sfc: remove control path logging from Rx queue count
+* net/softnic: fix possible buffer overflow
+* net/tap: fix getting max iovec
+* net/tap: fix memory leak on IPC request
+* net/tap: fix multi process reply buffer
+* net/tap: fix multi-process request
+* net/tap: fix potential IPC buffer overrun
+* net/vdev_netvsc: fix device cast
+* net/virtio: add barrier in interrupt enable
+* net/virtio: add barriers for extra descriptors on Rx split
+* net/virtio: fix buffer leak on VLAN insert
+* net/virtio: fix dangling pointer on failure
+* net/virtio: fix duplicate naming of include guard
+* net/virtio: fix in-order Tx path for split ring
+* net/virtio: remove forward declaration
+* net/virtio: remove useless condition
+* net/virtio: set offload flag for jumbo frames
+* net/virtio-user: fix multi-process support
+* net/virtio-user: fix multiqueue with vhost kernel
+* net/virtio-user: fix return value check
+* net/vmxnet3: add VLAN filter capability
+* power: fix cache line alignment
+* power: fix governor storage to trim newlines
+* power: fix thread-safety environment modification
+* power: remove unused variable
+* raw/dpaa2_cmdif: fix warnings with GCC 9
+* raw/dpaa2_qdma: fix spin lock release
+* raw/dpaa2_qdma: fix to support multiprocess execution
+* raw/ifpga: fix file descriptor leak in error path
+* raw/ifpga: modify log output
+* raw/skeleton: fix warnings with GCC 9
+* Revert "app/testpmd: fix offload flags after port config"
+* ring: enforce reading tail before slots
+* ring: fix an error message
+* ring: fix namesize macro documentation block
+* rwlock: reimplement with atomic builtins
+* spinlock: reimplement with atomic one-way barrier
+* table: fix arm64 hash function selection
+* telemetry: fix mapping of statistics
+* test/barrier: fix allocation check
+* test/barrier: fix for Power CPUs
+* test/barrier: fix typo in log
+* test/bonding: fix MAC assignment for re-run
+* test: clean remaining trace of devargs autotest
+* test/compress: fix missing header include
+* test/crypto: fix duplicate id used by CCP device
+* test/crypto: fix possible overflow using strlcat
+* test/distributor: replace sprintf with strlcpy
+* test/event: replace sprintf with snprintf
+* test/hash: replace sprintf with snprintf
+* test/pmd_perf: fix the way to drain the port
+* test/spinlock: amortize the cost of getting time
+* test/spinlock: remove delay for correct benchmarking
+* version: 18.11.2-rc1
+* vfio: document multiprocess limitation for container API
+* vhost/crypto: fix parens
+* vhost: fix device leak on connection add failure
+* vhost: fix interrupt suppression for the split ring
+* vhost: fix null pointer checking
+* vhost: fix passing destroyed device to destroy callback
+* vhost: fix potential use-after-free for memory region
+* vhost: fix potential use-after-free for zero copy mbuf
+* vhost: fix silent queue enabling with legacy guests
+* vhost: fix sprintf with snprintf
+* vhost: prevent disabled rings to be processed with zero-copy
+* vhost: restore mbuf first when freeing zmbuf
+
+18.11.2 Validation
+~~~~~~~~~~~~~~~~~~
+
+* IBM(R) Testing
+
+ * Tests run
+
+ * Single port stability test using l3fwd (16 cpus) and TRex
+ * 64 and 1500 byte packets at a 0.0% drop rate for 4 hours each
+
+ * System tested
+
+ * IBM Power9 Model 8335-101 CPU: 2.3 (pvr 004e 1203)
+
+ * Tested NICs
+
+ * ConnectX-5 (fw 16.23.1020).
+
+ * OS Tested
+
+ * Ubuntu 18.04.2 LTS (kernel 4.15.0-47-generic)
+
+* Intel(R) Openvswitch Testing
+
+ * OVS testing against head OVS Master and OVS 2.11.0 with VSPERF
+
+ * Tested NICs
+
+ * i40e (X710) and i40eVF
+ * ixgbe (82599ES) and ixgbeVF
+ * igb (I350) and igbVF
+
+ * Functionality
+
+ * P2P
+ * PVP
+ * PVVP
+ * PV-PV in parallel
+ * Hotplug
+ * Multiqueue
+ * Vhostuserclient reconnect
+ * Vhost cross-NUMA awareness
+ * Jumbo frames
+ * Rate limiting
+ * QoS policer
+
+* Mellanox(R) Testing
+
+ * Basic functionality
+
+ * Send and receive multiple types of traffic
+ * testpmd xstats counter test
+ * testpmd timestamp test
+ * Changing/checking link status through testpmd
+ * RTE flow and flow_director tests
+ * Some RSS tests
+ * VLAN stripping and insertion tests
+ * Checksum and TSO tests
+ * ptype tests
+ * Port interrupt testing
+ * Multi-process testing
+
+ * OFED versions tested
+
+ * MLNX_OFED_LINUX-4.5-1.0.1.0
+ * MLNX_OFED_LINUX-4.6-1.0.1.1
+
+ * Tested NICs
+
+ * ConnectX-4 Lx (fw 14.25.1010).
+ * ConnectX-5 (fw 16.25.1010).
+
+ * OS tested
+
+ * RHEL7.4 (kernel 3.10.0-693.el7.x86_64).
+
+* Microsoft(R) Azure Testing
+
+ * Images
+
+ * Canonical UbuntuServer 16.04-LTS latest
+ * Canonical UbuntuServer 18.04-DAILY-LTS latest
+ * RedHat RHEL 7-RAW latest
+ * RedHat RHEL 7.5 latest
+ * Openlogic CentOS 7.5 latest
+ * SUSE SLES 15 latest
+
+ * Drivers
+
+ * Mellanox and netvsc poll-mode drivers
+
+ * Functionality
+
+ * VM to VM traffic
+ * SRIOV/Failsafe
+ * Single and multicore performance
+
+* Red Hat(R) Testing
+
+ * RHEL 7 and RHEL 8
+ * Functionality
+
+ * PF
+ * VF
+ * vhost single/multi queues and cross-NUMA
+ * vhostclient reconnect
+ * vhost live migration with single/multi queues and cross-NUMA
+ * OVS PVP
+
+ * Tested NICs
+
+ * X540-AT2 NIC(ixgbe, 10G)
+
+* Intel(R) Testing
+
+ * Basic Intel(R) NIC(ixgbe and i40e) testing
+
+ * vlan
+ * vxlan
+ * Jumbo frames
+ * Generic filter
+ * Flow director
+ * PF and VF
+ * Intel NIC single core/NIC performance
+
+ * Basic cryptodev and virtio testing
+
+ * cryptodev
+ * vhost/virtio basic loopback, PVP and performance test
+
+18.11.2 Known Issues
+~~~~~~~~~~~~~~~~~~~~
+
+* DPDK 18.11.2 contains fixes up to DPDK v19.05. Issues identified/fixed in DPDK master branch after DPDK v19.05 may be present in DPDK 18.11.2
+* testpmd: queue specific offloads may be over-written by the default config. This is not a regression from earlier DPDK 18.11 releases.
+
+Fixes skipped and status unresolved
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* dcfbc594f net/iavf: fix queue interrupt for ice (18.02)
+* 281bd1aa3 net/iavf: fix stats reset (18.02)
+* fe252fb69 test/rwlock: benchmark on all available cores (1.2.3r0)
+* 6fef1ae4f test/rwlock: amortize the cost of getting time (1.2.3r0)
diff --git a/doc/guides/sample_app_ug/bbdev_app.rst b/doc/guides/sample_app_ug/bbdev_app.rst
index 653f972f..6ade9c6e 100644
--- a/doc/guides/sample_app_ug/bbdev_app.rst
+++ b/doc/guides/sample_app_ug/bbdev_app.rst
@@ -68,7 +68,7 @@ The application accepts a number of command line options:
where:
-* ``e ENCODING_CORES``: hexmask for encoding lcored (default = 0x2)
+* ``e ENCODING_CORES``: hexmask for encoding lcores (default = 0x2)
* ``d DECODING_CORES``: hexmask for decoding lcores (default = 0x4)
* ``p ETH_PORT_ID``: ethernet port ID (default = 0)
* ``b BBDEV_ID``: BBDev ID (default = 0)
@@ -87,7 +87,7 @@ issue the command:
$ ./build/bbdev --vdev='baseband_turbo_sw' -w <NIC0PCIADDR> -c 0x38 --socket-mem=2,2 \
--file-prefix=bbdev -- -e 0x10 -d 0x20
-where, NIC0PCIADDR is the PCI addresse of the Rx port
+where, NIC0PCIADDR is the PCI address of the Rx port
This command creates one virtual bbdev devices ``baseband_turbo_sw`` where the
device gets linked to a corresponding ethernet port as whitelisted by
diff --git a/doc/guides/sample_app_ug/eventdev_pipeline.rst b/doc/guides/sample_app_ug/eventdev_pipeline.rst
index 0ec02907..dc7972aa 100644
--- a/doc/guides/sample_app_ug/eventdev_pipeline.rst
+++ b/doc/guides/sample_app_ug/eventdev_pipeline.rst
@@ -49,7 +49,7 @@ these settings is shown below:
./build/eventdev_pipeline --vdev event_sw0 -- -r1 -t1 -e4 -w FF00 -s4 -n0 -c32 -W1000 -D
The application has some sanity checking built-in, so if there is a function
-(eg; the RX core) which doesn't have a cpu core mask assigned, the application
+(e.g.; the RX core) which doesn't have a cpu core mask assigned, the application
will print an error message:
.. code-block:: console
diff --git a/doc/guides/sample_app_ug/intro.rst b/doc/guides/sample_app_ug/intro.rst
index 159bcf73..90704194 100644
--- a/doc/guides/sample_app_ug/intro.rst
+++ b/doc/guides/sample_app_ug/intro.rst
@@ -106,7 +106,7 @@ examples are highlighted below.
(packet arrival) and TX (packet transmission) by adding callbacks to the RX
and TX packet processing functions.
-* :doc:`IPSec Security Gateway<ipsec_secgw>`: The IPSec Security
+* :doc:`IPsec Security Gateway<ipsec_secgw>`: The IPsec Security
Gateway application is minimal example of something closer to a real world
example. This is also a good example of an application using the DPDK
Cryptodev framework.
diff --git a/doc/guides/sample_app_ug/ip_pipeline.rst b/doc/guides/sample_app_ug/ip_pipeline.rst
index 447a544d..4da0fcf8 100644
--- a/doc/guides/sample_app_ug/ip_pipeline.rst
+++ b/doc/guides/sample_app_ug/ip_pipeline.rst
@@ -113,7 +113,7 @@ Application stages
Initialization
~~~~~~~~~~~~~~
-During this stage, EAL layer is initialised and application specific arguments are parsed. Furthermore, the data strcutures
+During this stage, EAL layer is initialised and application specific arguments are parsed. Furthermore, the data structures
(i.e. linked lists) for application objects are initialized. In case of any initialization error, an error message
is displayed and the application is terminated.
@@ -185,7 +185,7 @@ Examples
+-----------------------+----------------------+----------------+------------------------------------+
| IP routing | LPM (IPv4) | Forward | 1. Mempool Create |
| | | | 2. Link create |
- | | * Key = IP dest addr | | 3. Pipeline creat |
+ | | * Key = IP dest addr | | 3. Pipeline create |
| | * Offset = 286 | | 4. Pipeline port in/out |
| | * Table size = 4K | | 5. Pipeline table |
| | | | 6. Pipeline port in table |
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 4869a011..5f7d3973 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -25,8 +25,8 @@ The application classifies the ports as *Protected* and *Unprotected*.
Thus, traffic received on an Unprotected or Protected port is consider
Inbound or Outbound respectively.
-The application also supports complete IPSec protocol offload to hardware
-(Look aside crypto accelarator or using ethernet device). It also support
+The application also supports complete IPsec protocol offload to hardware
+(Look aside crypto accelerator or using ethernet device). It also support
inline ipsec processing by the supported ethernet device during transmission.
These modes can be selected during the SA creation configuration.
diff --git a/doc/guides/sample_app_ug/performance_thread.rst b/doc/guides/sample_app_ug/performance_thread.rst
index fbd30a5e..96f0fc6f 100644
--- a/doc/guides/sample_app_ug/performance_thread.rst
+++ b/doc/guides/sample_app_ug/performance_thread.rst
@@ -500,8 +500,8 @@ An application may save and retrieve a single pointer to application data in
the L-thread struct.
For legacy and backward compatibility reasons two alternative methods are also
-offered, the first is modelled directly on the pthread get/set specific APIs,
-the second approach is modelled on the ``RTE_PER_LCORE`` macros, whereby
+offered, the first is modeled directly on the pthread get/set specific APIs,
+the second approach is modeled on the ``RTE_PER_LCORE`` macros, whereby
``PER_LTHREAD`` macros are introduced, in both cases the storage is local to
the L-thread.
diff --git a/doc/guides/sample_app_ug/qos_metering.rst b/doc/guides/sample_app_ug/qos_metering.rst
index 2e8e0175..d75f7da5 100644
--- a/doc/guides/sample_app_ug/qos_metering.rst
+++ b/doc/guides/sample_app_ug/qos_metering.rst
@@ -151,5 +151,5 @@ In this particular case:
* For the rest of the cases, the color is changed to red.
.. note::
- * In color blind mode, first row GREEN colour is only valid.
+ * In color blind mode, first row GREEN color is only valid.
* To drop the packet, policer_table action has to be set to DROP.
diff --git a/doc/guides/sample_app_ug/test_pipeline.rst b/doc/guides/sample_app_ug/test_pipeline.rst
index a9370c80..af9665b2 100644
--- a/doc/guides/sample_app_ug/test_pipeline.rst
+++ b/doc/guides/sample_app_ug/test_pipeline.rst
@@ -32,7 +32,7 @@ Compiling the Application
-------------------------
To compile the sample application see :doc:`compiling`
-The application is located in the ``$RTE_SDK/test/test-pipline`` directory.
+The application is located in the ``$RTE_SDK/test/test-pipeline`` directory.
Running the Application
diff --git a/doc/guides/sample_app_ug/vhost.rst b/doc/guides/sample_app_ug/vhost.rst
index df4d6f9a..a71ada65 100644
--- a/doc/guides/sample_app_ug/vhost.rst
+++ b/doc/guides/sample_app_ug/vhost.rst
@@ -116,7 +116,7 @@ will create it. Put simply, it's the server to create the socket file.
The vm2vm parameter sets the mode of packet switching between guests in
the host.
-- 0 disables vm2vm, impling that VM's packets will always go to the NIC port.
+- 0 disables vm2vm, implying that VM's packets will always go to the NIC port.
- 1 means a normal mac lookup packet routing.
- 2 means hardware mode packet forwarding between guests, it allows packets
go to the NIC port, hardware L2 switch will determine which guest the
@@ -148,7 +148,7 @@ default value is 15.
**--dequeue-zero-copy**
Dequeue zero copy will be enabled when this option is given. it is worth to
-note that if NIC is binded to driver with iommu enabled, dequeue zero copy
+note that if NIC is bound to driver with iommu enabled, dequeue zero copy
cannot work at VM2NIC mode (vm2vm=0) due to currently we don't setup iommu
dma mapping for guest memory.
diff --git a/doc/guides/sample_app_ug/vhost_scsi.rst b/doc/guides/sample_app_ug/vhost_scsi.rst
index 7ab7d0d5..6b9bf62e 100644
--- a/doc/guides/sample_app_ug/vhost_scsi.rst
+++ b/doc/guides/sample_app_ug/vhost_scsi.rst
@@ -63,7 +63,7 @@ Vhost_scsi Common Issues
* vhost_scsi can not start with block size 512 Bytes:
- Currently DPDK vhost library was designed for NET device(althrough the APIs
+ Currently DPDK vhost library was designed for NET device(although the APIs
are generic now), for 512 Bytes block device, Qemu BIOS(x86 BIOS Enhanced
Disk Device) will enumerate all block device and do some IOs to those block
devices with 512 Bytes sector size. DPDK vhost library can not process such
diff --git a/doc/guides/sample_app_ug/vm_power_management.rst b/doc/guides/sample_app_ug/vm_power_management.rst
index 5be9f24d..a7b7a2c7 100644
--- a/doc/guides/sample_app_ug/vm_power_management.rst
+++ b/doc/guides/sample_app_ug/vm_power_management.rst
@@ -339,7 +339,7 @@ monitoring of branch ratio on cores doing busy polling via PMDs.
When this parameter is used, the list of cores specified will monitor the ratio
between branch hits and branch misses. A tightly polling PMD thread will have a
- very low branch ratio, so the core frequency will be scaled down to the minimim
+ very low branch ratio, so the core frequency will be scaled down to the minimum
allowed value. When packets are received, the code path will alter, causing the
branch ratio to increase. When the ratio goes above the ratio threshold, the
core frequency will be scaled up to the maximum allowed value.
@@ -379,7 +379,7 @@ the file.
The fifo is at /tmp/powermonitor/fifo
-The jason string can be a policy or instruction, and takes the following
+The JSON string can be a policy or instruction, and takes the following
format:
.. code-block:: javascript
@@ -592,7 +592,7 @@ Profile destroy example:
.. code-block:: javascript
- {"profile": {
+ {"policy": {
"name": "ubuntu",
"command": "destroy",
}}
@@ -601,8 +601,9 @@ Power command example:
.. code-block:: javascript
- {"command": {
+ {"instruction": {
"name": "ubuntu",
+ "command": "power",
"unit": "SCALE_MAX",
"resource_id": 10
}}
@@ -729,7 +730,7 @@ policy down to the host application. These parameters are as follows:
A comma-separated list of cores in the VM that the user wants the host application to
monitor. The list of cores in any vm starts at zero, and these are mapped to the
physical cores by the host application once the policy is passed down.
- Valid syntax includes individial cores '2,3,4', or a range of cores '2-4', or a
+ Valid syntax includes individual cores '2,3,4', or a range of cores '2-4', or a
combination of both '1,3,5-7'
.. code-block:: console
@@ -737,7 +738,7 @@ policy down to the host application. These parameters are as follows:
--busy-hours {list of busy hours}
A comma-separated list of hours within which to set the core frequency to maximum.
- Valid syntax includes individial hours '2,3,4', or a range of hours '2-4', or a
+ Valid syntax includes individual hours '2,3,4', or a range of hours '2-4', or a
combination of both '1,3,5-7'. Valid hours are 0 to 23.
.. code-block:: console
@@ -745,7 +746,7 @@ policy down to the host application. These parameters are as follows:
--quiet-hours {list of quiet hours}
A comma-separated list of hours within which to set the core frequency to minimum.
- Valid syntax includes individial hours '2,3,4', or a range of hours '2-4', or a
+ Valid syntax includes individual hours '2,3,4', or a range of hours '2-4', or a
combination of both '1,3,5-7'. Valid hours are 0 to 23.
.. code-block:: console
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 4495ed03..89df0316 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -22,7 +22,7 @@ They must be separated from the EAL options, shown in the previous section, with
sudo ./testpmd -l 0-3 -n 4 -- -i --portmask=0x1 --nb-cores=2
-The commandline options are:
+The command line options are:
* ``-i, --interactive``
@@ -365,7 +365,7 @@ The commandline options are:
* ``--hot-plug``
- Enable device event monitor machenism for hotplug.
+ Enable device event monitor mechanism for hotplug.
* ``--vxlan-gpe-port=N``
@@ -405,21 +405,21 @@ The commandline options are:
* ``--noisy-lkup-memory=N``
- Set the size of the noisy neighbour simulation memory buffer in MB to N.
+ Set the size of the noisy neighbor simulation memory buffer in MB to N.
Only available with the noisy forwarding mode. The default value is 0.
* ``--noisy-lkup-num-reads=N``
- Set the number of reads to be done in noisy neighbour simulation memory buffer to N.
+ Set the number of reads to be done in noisy neighbor simulation memory buffer to N.
Only available with the noisy forwarding mode. The default value is 0.
* ``--noisy-lkup-num-writes=N``
- Set the number of writes to be done in noisy neighbour simulation memory buffer to N.
+ Set the number of writes to be done in noisy neighbor simulation memory buffer to N.
Only available with the noisy forwarding mode. The default value is 0.
* ``--noisy-lkup-num-reads-writes=N``
- Set the number of r/w accesses to be done in noisy neighbour simulation memory buffer to N.
+ Set the number of r/w accesses to be done in noisy neighbor simulation memory buffer to N.
Only available with the noisy forwarding mode. The default value is 0.
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 854af2d5..81f10727 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -302,7 +302,7 @@ The available information categories are:
This is the default mode.
* ``mac``: Changes the source and the destination Ethernet addresses of packets before forwarding them.
- Default application behaviour is to set source Ethernet address to that of the transmitting interface, and destination
+ Default application behavior is to set source Ethernet address to that of the transmitting interface, and destination
address to a dummy value (set during init). The user may specify a target destination Ethernet address via the 'eth-peer' or
'eth-peers-configfile' command-line options. It is not currently possible to specify a specific source Ethernet address.
@@ -325,7 +325,7 @@ The available information categories are:
* ``softnic``: Demonstrates the softnic forwarding operation. In this mode, packet forwarding is
similar to I/O mode except for the fact that packets are loopback to the softnic ports only. Therefore, portmask parameter should be set to softnic port only. The various software based custom NIC pipelines specified through the softnic firmware (DPDK packet framework script) can be tested in this mode. Furthermore, it allows to build 5-level hierarchical QoS scheduler as a default option that can be enabled through CLI once testpmd application is initialised. The user can modify the default scheduler hierarchy or can specify the new QoS Scheduler hierarchy through CLI. Requires ``CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y``.
-* ``noisy``: Noisy neighbour simulation.
+* ``noisy``: Noisy neighbor simulation.
Simulate more realistic behavior of a guest machine engaged in receiving
and sending packets performing Virtual Network Function (VNF).
@@ -430,6 +430,56 @@ Show Tx metadata value set for a specific port::
testpmd> show port (port_id) tx_metadata
+dump physmem
+~~~~~~~~~~~~
+
+Dumps all physical memory segment layouts::
+
+ testpmd> dump_physmem
+
+dump memzone
+~~~~~~~~~~~~
+
+Dumps the layout of all memory zones::
+
+ testpmd> dump_memzone
+
+
+dump struct size
+~~~~~~~~~~~~~~~~
+
+Dumps the size of all memory structures::
+
+ testpmd> dump_struct_sizes
+
+dump ring
+~~~~~~~~~
+
+Dumps the status of all or specific element in DPDK rings::
+
+ testpmd> dump_ring [ring_name]
+
+dump mempool
+~~~~~~~~~~~~
+
+Dumps the statistics of all or specific memory pool::
+
+ testpmd> dump_mempool [mempool_name]
+
+dump devargs
+~~~~~~~~~~~~
+
+Dumps the user device list::
+
+ testpmd> dump_devargs
+
+dump log types
+~~~~~~~~~~~~~~
+
+Dumps the log level for all the dpdk modules::
+
+ testpmd> dump_log_types
+
Configuration Functions
-----------------------
@@ -476,9 +526,11 @@ Where:
* ``level`` is the log level.
For example, to change the global log level::
+
testpmd> set log global (level)
Regexes can also be used for type. To change log level of user1, user2 and user3::
+
testpmd> set log user[1-3] (level)
set nbport
@@ -930,11 +982,12 @@ Flush all queue region related configuration on a port::
where:
-* "on"is just an enable function which server for other configuration,
+* ``on``: is just an enable function which server for other configuration,
it is for all configuration about queue region from up layer,
- at first will only keep in DPDK softwarestored in driver,
+ at first will only keep in DPDK software stored in driver,
only after "flush on", it commit all configuration to HW.
- "off" is just clean all configuration about queue region just now,
+
+* ``off``: is just clean all configuration about queue region just now,
and restore all to DPDK i40e driver default config when start up.
Show all queue region related configuration info on a port::
@@ -1002,6 +1055,20 @@ Display the status of TCP Segmentation Offload::
testpmd> tso show (port_id)
+tunnel tso set
+~~~~~~~~~~~~~~
+
+Set tso segment size of tunneled packets for a port in csum engine::
+
+ testpmd> tunnel_tso set (tso_segsz) (port_id)
+
+tunnel tso show
+~~~~~~~~~~~~~~~
+
+Display the status of tunneled TCP Segmentation Offload for a port::
+
+ testpmd> tunnel_tso show (port_id)
+
set port - gro
~~~~~~~~~~~~~~
@@ -1123,6 +1190,22 @@ Remove a MAC address from a port::
testpmd> mac_addr remove (port_id) (XX:XX:XX:XX:XX:XX)
+mcast_addr add
+~~~~~~~~~~~~~~
+
+To add the multicast MAC address to/from the set of multicast addresses
+filtered by port::
+
+ testpmd> mcast_addr add (port_id) (mcast_addr)
+
+mcast_addr remove
+~~~~~~~~~~~~~~~~~
+
+To remove the multicast MAC address to/from the set of multicast addresses
+filtered by port::
+
+ testpmd> mcast_addr remove (port_id) (mcast_addr)
+
mac_addr add (for VF)
~~~~~~~~~~~~~~~~~~~~~
@@ -1583,7 +1666,7 @@ Configure the outer layer to encapsulate a packet inside a VXLAN tunnel::
udp-dst (udp-dst) ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci) \
eth-src (eth-src) eth-dst (eth-dst)
-Those command will set an internal configuration inside testpmd, any following
+These commands will set an internal configuration inside testpmd, any following
flow rule using the action vxlan_encap will use the last configuration set.
To have a different encapsulation header, one of those commands must be called
before the flow rule creation.
@@ -1598,7 +1681,7 @@ Configure the outer layer to encapsulate a packet inside a NVGRE tunnel::
set nvgre-with-vlan ip-version (ipv4|ipv6) tni (tni) ip-src (ip-src) \
ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src) eth-dst (eth-dst)
-Those command will set an internal configuration inside testpmd, any following
+These commands will set an internal configuration inside testpmd, any following
flow rule using the action nvgre_encap will use the last configuration set.
To have a different encapsulation header, one of those commands must be called
before the flow rule creation.
@@ -1641,7 +1724,7 @@ Configure the outer layer to encapsulate a packet inside a MPLSoGRE tunnel::
ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci) \
eth-src (eth-src) eth-dst (eth-dst)
-Those command will set an internal configuration inside testpmd, any following
+These commands will set an internal configuration inside testpmd, any following
flow rule using the action mplsogre_encap will use the last configuration set.
To have a different encapsulation header, one of those commands must be called
before the flow rule creation.
@@ -1654,7 +1737,7 @@ Configure the outer layer to decapsulate MPLSoGRE packet::
set mplsogre_decap ip-version (ipv4|ipv6)
set mplsogre_decap-with-vlan ip-version (ipv4|ipv6)
-Those command will set an internal configuration inside testpmd, any following
+These commands will set an internal configuration inside testpmd, any following
flow rule using the action mplsogre_decap will use the last configuration set.
To have a different decapsulation header, one of those commands must be called
before the flow rule creation.
@@ -1671,7 +1754,7 @@ Configure the outer layer to encapsulate a packet inside a MPLSoUDP tunnel::
udp-src (udp-src) udp-dst (udp-dst) ip-src (ip-src) ip-dst (ip-dst) \
vlan-tci (vlan-tci) eth-src (eth-src) eth-dst (eth-dst)
-Those command will set an internal configuration inside testpmd, any following
+These commands will set an internal configuration inside testpmd, any following
flow rule using the action mplsoudp_encap will use the last configuration set.
To have a different encapsulation header, one of those commands must be called
before the flow rule creation.
@@ -1684,7 +1767,7 @@ Configure the outer layer to decapsulate MPLSoUDP packet::
set mplsoudp_decap ip-version (ipv4|ipv6)
set mplsoudp_decap-with-vlan ip-version (ipv4|ipv6)
-Those command will set an internal configuration inside testpmd, any following
+These commands will set an internal configuration inside testpmd, any following
flow rule using the action mplsoudp_decap will use the last configuration set.
To have a different decapsulation header, one of those commands must be called
before the flow rule creation.
@@ -2113,11 +2196,13 @@ port config input set
~~~~~~~~~~~~~~~~~~~~~
Config RSS/FDIR/FDIR flexible payload input set for some pctype::
+
testpmd> port config (port_id) pctype (pctype_id) \
(hash_inset|fdir_inset|fdir_flx_inset) \
(get|set|clear) field (field_idx)
Clear RSS/FDIR/FDIR flexible payload input set for some pctype::
+
testpmd> port config (port_id) pctype (pctype_id) \
(hash_inset|fdir_inset|fdir_flx_inset) clear all
@@ -2130,6 +2215,7 @@ port config udp_tunnel_port
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add/remove UDP tunnel port for VXLAN/GENEVE tunneling protocols::
+
testpmd> port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port)
port config tx_metadata
@@ -2140,6 +2226,26 @@ testpmd will add this value to any Tx packet sent from this port::
testpmd> port config (port_id) tx_metadata (value)
+port config mtu
+~~~~~~~~~~~~~~~
+
+To configure MTU(Maximum Transmission Unit) on devices using testpmd::
+
+ testpmd> port config mtu (port_id) (value)
+
+port config rss hash key
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+To configure the RSS hash key used to compute the RSS
+hash of input [IP] packets received on port::
+
+ testpmd> port config <port_id> rss-hash-key (ipv4|ipv4-frag|\
+ ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|\
+ ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|\
+ ipv6-other|l2-payload|ipv6-ex|ipv6-tcp-ex|\
+ ipv6-udp-ex <string of hex digits \
+ (variable length, NIC dependent)>)
+
Link Bonding Functions
----------------------
@@ -2246,7 +2352,7 @@ set bonding lacp dedicated_queue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Enable dedicated tx/rx queues on bonding devices slaves to handle LACP control plane traffic
-when in mode 4 (link-aggregration-802.3ad)::
+when in mode 4 (link-aggregation-802.3ad)::
testpmd> set bonding lacp dedicated_queues (port_id) (enable|disable)
@@ -2254,7 +2360,7 @@ when in mode 4 (link-aggregration-802.3ad)::
set bonding agg_mode
~~~~~~~~~~~~~~~~~~~~
-Enable one of the specific aggregators mode when in mode 4 (link-aggregration-802.3ad)::
+Enable one of the specific aggregators mode when in mode 4 (link-aggregation-802.3ad)::
testpmd> set bonding agg_mode (port_id) (bandwidth|count|stable)
@@ -2648,8 +2754,8 @@ where:
* ``shared_shaper_id``: Shared shaper ID to be deleted.
-Set port traffic management hiearchy node private shaper
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Set port traffic management hierarchy node private shaper
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
set the port traffic management hierarchy node private shaper::
@@ -2700,7 +2806,7 @@ Delete the WRED profile::
Add port traffic management hierarchy nonleaf node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Add nonleaf node to port traffic management hiearchy::
+Add nonleaf node to port traffic management hierarchy::
testpmd> add port tm nonleaf node (port_id) (node_id) (parent_node_id) \
(priority) (weight) (level_id) (shaper_profile_id) \
@@ -2715,7 +2821,7 @@ where:
* ``weight``: Node weight (lowest weight is one). The node weight is relative
to the weight sum of all siblings that have the same priority. It is used by
the WFQ algorithm running on the parent node for scheduling this node.
-* ``level_id``: Hiearchy level of the node.
+* ``level_id``: Hierarchy level of the node.
* ``shaper_profile_id``: Shaper profile ID of the private shaper to be used by
the node.
* ``n_sp_priorities``: Number of strict priorities.
@@ -2726,7 +2832,7 @@ where:
Add port traffic management hierarchy leaf node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Add leaf node to port traffic management hiearchy::
+Add leaf node to port traffic management hierarchy::
testpmd> add port tm leaf node (port_id) (node_id) (parent_node_id) \
(priority) (weight) (level_id) (shaper_profile_id) \
@@ -2741,7 +2847,7 @@ where:
* ``weight``: Node weight (lowest weight is one). The node weight is relative
to the weight sum of all siblings that have the same priority. It is used by
the WFQ algorithm running on the parent node for scheduling this node.
-* ``level_id``: Hiearchy level of the node.
+* ``level_id``: Hierarchy level of the node.
* ``shaper_profile_id``: Shaper profile ID of the private shaper to be used by
the node.
* ``cman_mode``: Congestion management mode to be enabled for this node.
@@ -2753,7 +2859,7 @@ where:
Delete port traffic management hierarchy node
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Delete node from port traffic management hiearchy::
+Delete node from port traffic management hierarchy::
testpmd> del port tm node (port_id) (node_id)
@@ -3705,7 +3811,7 @@ This section lists supported pattern items and their attributes, if any.
- ``sla {MAC-48}``: source Ethernet LLA.
-- ``icmp6_nd_opt_sla_eth``: match ICMPv6 neighbor discovery target Ethernet
+- ``icmp6_nd_opt_tla_eth``: match ICMPv6 neighbor discovery target Ethernet
link-layer address option.
- ``tla {MAC-48}``: target Ethernet LLA.
@@ -3946,7 +4052,7 @@ This section lists supported actions and their attributes, if any.
- ``dec_ttl``: Performs a decrease TTL value action
-- ``set_ttl``: Set TTL value with specificed value
+- ``set_ttl``: Set TTL value with specified value
- ``ttl_value {unsigned}``: The new TTL value to be set
- ``set_mac_src``: set source MAC address
@@ -4467,7 +4573,7 @@ The following sections show functions to load/unload eBPF based filters.
bpf-load
~~~~~~~~
-Load an eBPF program as a callback for partciular RX/TX queue::
+Load an eBPF program as a callback for particular RX/TX queue::
testpmd> bpf-load rx|tx (portid) (queueid) (load-flags) (bpf-prog-filename)
@@ -4490,13 +4596,13 @@ For example:
cd test/bpf
clang -O2 -target bpf -c t1.c
-Then to load (and JIT compile) t1.o at RX queue 0, port 1::
+Then to load (and JIT compile) t1.o at RX queue 0, port 1:
.. code-block:: console
testpmd> bpf-load rx 1 0 J ./dpdk.org/test/bpf/t1.o
-To load (not JITed) t1.o at TX queue 0, port 0::
+To load (not JITed) t1.o at TX queue 0, port 0:
.. code-block:: console
@@ -4505,7 +4611,7 @@ To load (not JITed) t1.o at TX queue 0, port 0::
bpf-unload
~~~~~~~~~~
-Unload previously loaded eBPF program for partciular RX/TX queue::
+Unload previously loaded eBPF program for particular RX/TX queue::
testpmd> bpf-unload rx|tx (portid) (queueid)
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index c366af4e..2fc65441 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -59,7 +59,7 @@ To set on the linearization options add below definition to the
**Step 3: Build the application**
Execute the ``dpdk-setup.sh`` script to build the DPDK library together with the
-``dpdk-test-crypto-perf`` applcation.
+``dpdk-test-crypto-perf`` application.
Initially, the user must select a DPDK target to choose the correct target type
and compiler options to use when building the libraries.
@@ -80,7 +80,7 @@ EAL Options
~~~~~~~~~~~
The following are the EAL command-line options that can be used in conjunction
-with the ``dpdk-test-crypto-perf`` applcation.
+with the ``dpdk-test-crypto-perf`` application.
See the DPDK Getting Started Guides for more information on these options.
* ``-c <COREMASK>`` or ``-l <CORELIST>``
@@ -96,10 +96,10 @@ See the DPDK Getting Started Guides for more information on these options.
Add a virtual device.
-Appication Options
-~~~~~~~~~~~~~~~~~~
+Application Options
+~~~~~~~~~~~~~~~~~~~
-The following are the appication command-line options:
+The following are the application command-line options:
* ``--ptest type``
@@ -338,13 +338,13 @@ Test Vector File
The test vector file is a text file contain information about test vectors.
The file is made of the sections. The first section doesn't have header.
It contain global information used in each test variant vectors -
-typically information about plaintext, ciphertext, cipher key, aut key,
+typically information about plaintext, ciphertext, cipher key, auth key,
initial vector. All other sections begin header.
The sections contain particular information typically digest.
**Format of the file:**
-Each line beginig with sign '#' contain comment and it is ignored by parser::
+Each line beginning with sign '#' contain comment and it is ignored by parser::
# <comment>
@@ -352,16 +352,16 @@ Header line is just name in square bracket::
[<section name>]
-Data line contain information tocken then sign '=' and
+Data line contain information token then sign '=' and
a string of bytes in C byte array format::
- <tocken> = <C byte array>
+ <token> = <C byte array>
-**Tockens list:**
+**Tokens list:**
* ``plaintext``
- Original plaintext to be crypted.
+ Original plaintext to be encrypted.
* ``ciphertext``
diff --git a/doc/guides/tools/proc_info.rst b/doc/guides/tools/proc_info.rst
index d5b5ed6a..e9a0b79d 100644
--- a/doc/guides/tools/proc_info.rst
+++ b/doc/guides/tools/proc_info.rst
@@ -48,7 +48,7 @@ Limitations
* When running ``dpdk-procinfo`` with shared library mode, it is required to
pass the same NIC PMD libraries as used for the primary application. Any
- mismatch in PMD library arguments can lead to undefined behaviour and results
+ mismatch in PMD library arguments can lead to undefined behavior and results
affecting primary application too.
* Stats retrieval using ``dpdk-procinfo`` is not supported for virtual devices like PCAP and TAP.
diff --git a/doc/guides/tools/testbbdev.rst b/doc/guides/tools/testbbdev.rst
index 5caa9023..a8cb6fbb 100644
--- a/doc/guides/tools/testbbdev.rst
+++ b/doc/guides/tools/testbbdev.rst
@@ -139,7 +139,7 @@ There are 6 main test cases that can be executed using testbbdev tool:
* Latency measurement [-c latency]
- Measures the time consumed from the first enqueue until the first
appearance of a dequeued result
- - This measurment represents the full latency of a bbdev operation
+ - This measurement represents the full latency of a bbdev operation
(encode or decode) to execute
* Poll-mode Throughput measurement [-c throughput]
@@ -307,7 +307,7 @@ baseband_null device does not have to be defined explicitly as it is created by
Test Vector files
-=================
+-----------------
Test Vector files contain the data which is used to set turbo decoder/encoder
parameters and buffers for validation purpose. New test vector files should be
@@ -316,7 +316,7 @@ the syntax of the test vector files is in the following section.
Basic principles for test vector files
---------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Line started with ``#`` is treated as a comment and is ignored.
If variable is a chain of values, values should be separated by a comma. If
@@ -351,7 +351,7 @@ documented in *rte_bbdev_op.h*
Turbo decoder test vectors template
------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For turbo decoder it has to be always set to ``RTE_BBDEV_OP_TURBO_DEC``
@@ -528,7 +528,7 @@ Following statuses can be used:
Turbo encoder test vectors template
------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For turbo encoder it has to be always set to ``RTE_BBDEV_OP_TURBO_ENC``
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 4ebbc3d3..9ab8e835 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -603,9 +603,9 @@ fman_if_discard_rx_errors(struct fman_if *fm_if)
out_be32(fmbm_rfsem, 0);
/* Configure the discard mask to discard the error packets which have
- * DMA errors, Frame size error, Header error etc. The mask 0x010CE3F0
+ * DMA errors, Frame size error, Header error etc. The mask 0x010EE3F0
* is to configured discard all the errors which come in the FD[STATUS]
*/
fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
- out_be32(fmbm_rfsdm, 0x010CE3F0);
+ out_be32(fmbm_rfsdm, 0x010EE3F0);
}
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 2dcdca01..776b7caa 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -195,7 +195,7 @@ scan_one_fslmc_device(char *dev_name)
t_ptr = strtok(NULL, ".");
if (!t_ptr) {
- DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ DPAA2_BUS_ERR("Incorrect device string observed (null)");
goto cleanup;
}
@@ -246,13 +246,13 @@ rte_fslmc_parse(const char *name, void *addr)
strncmp("dpci", t_ptr, 4) &&
strncmp("dpmcp", t_ptr, 5) &&
strncmp("dpdmai", t_ptr, 6)) {
- DPAA2_BUS_ERR("Unknown or unsupported device");
+ DPAA2_BUS_DEBUG("Unknown or unsupported device (%s)", name);
goto err_out;
}
t_ptr = strchr(name, '.');
if (!t_ptr) {
- DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ DPAA2_BUS_ERR("Incorrect device string observed (null)");
goto err_out;
}
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 655bff4b..1ddd69e1 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -11,13 +11,13 @@
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
+#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <malloc.h>
#include <unistd.h>
-#include <error.h>
#include <linux/types.h>
#include <rte_atomic.h>
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
index 072ad551..e010b1b6 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
@@ -1,7 +1,7 @@
-/* Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
*/
+
struct qbman_swp;
struct qbman_fq_query_np_rslt {
diff --git a/drivers/bus/fslmc/qbman/qbman_debug.c b/drivers/bus/fslmc/qbman/qbman_debug.c
index 591673ab..0bb2ce88 100644
--- a/drivers/bus/fslmc/qbman/qbman_debug.c
+++ b/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -1,6 +1,5 @@
-/* Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
*/
#include "compat.h"
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index 2c03ca41..87f0e2b6 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -143,10 +143,11 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
struct rte_vdev_driver *driver;
int ret;
- name = rte_vdev_device_name(dev);
+ if (rte_dev_is_probed(&dev->device))
+ return -EEXIST;
- VDEV_LOG(DEBUG, "Search driver %s to probe device %s", name,
- rte_vdev_device_name(dev));
+ name = rte_vdev_device_name(dev);
+ VDEV_LOG(DEBUG, "Search driver to probe device %s", name);
if (vdev_parse(name, &driver))
return -1;
@@ -482,7 +483,7 @@ static int
vdev_probe(void)
{
struct rte_vdev_device *dev;
- int ret = 0;
+ int r, ret = 0;
/* call the init function for each virtual device */
TAILQ_FOREACH(dev, &vdev_device_list, next) {
@@ -491,10 +492,10 @@ vdev_probe(void)
* we call each driver probe.
*/
- if (rte_dev_is_probed(&dev->device))
- continue;
-
- if (vdev_probe_all_drivers(dev)) {
+ r = vdev_probe_all_drivers(dev);
+ if (r != 0) {
+ if (r == -EEXIST)
+ continue;
VDEV_LOG(ERR, "failed to initialize %s device",
rte_vdev_device_name(dev));
ret = -1;
diff --git a/drivers/bus/vmbus/linux/vmbus_uio.c b/drivers/bus/vmbus/linux/vmbus_uio.c
index 38df4d72..7cab5c19 100644
--- a/drivers/bus/vmbus/linux/vmbus_uio.c
+++ b/drivers/bus/vmbus/linux/vmbus_uio.c
@@ -202,6 +202,7 @@ static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev,
char ring_path[PATH_MAX];
size_t file_size;
struct stat sb;
+ void *mapaddr;
int fd;
snprintf(ring_path, sizeof(ring_path),
@@ -232,18 +233,65 @@ static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev,
return -EINVAL;
}
- *ring_size = file_size / 2;
- *ring_buf = vmbus_map_resource(vmbus_map_addr, fd,
- 0, sb.st_size, 0);
+ mapaddr = vmbus_map_resource(vmbus_map_addr, fd,
+ 0, file_size, 0);
close(fd);
- if (ring_buf == MAP_FAILED)
+ if (mapaddr == MAP_FAILED)
return -EIO;
+ *ring_size = file_size / 2;
+ *ring_buf = mapaddr;
+
vmbus_map_addr = RTE_PTR_ADD(ring_buf, file_size);
return 0;
}
+int
+vmbus_uio_map_secondary_subchan(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ const struct vmbus_br *br = &chan->txbr;
+ char ring_path[PATH_MAX];
+ void *mapaddr, *ring_buf;
+ uint32_t ring_size;
+ int fd;
+
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name,
+ chan->relid);
+
+ ring_buf = br->vbr;
+ ring_size = br->dsize + sizeof(struct vmbus_bufring);
+ VMBUS_LOG(INFO, "secondary ring_buf %p size %u",
+ ring_buf, ring_size);
+
+ fd = open(ring_path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ ring_path, strerror(errno));
+ return -errno;
+ }
+
+ mapaddr = vmbus_map_resource(ring_buf, fd, 0, 2 * ring_size, 0);
+ close(fd);
+
+ if (mapaddr == ring_buf)
+ return 0;
+
+ if (mapaddr == MAP_FAILED)
+ VMBUS_LOG(ERR,
+ "mmap subchan %u in secondary failed", chan->relid);
+ else {
+ VMBUS_LOG(ERR,
+ "mmap subchan %u in secondary address mismatch",
+ chan->relid);
+ vmbus_unmap_resource(mapaddr, 2 * ring_size);
+ }
+ return -1;
+}
+
int vmbus_uio_map_rings(struct vmbus_channel *chan)
{
const struct rte_vmbus_device *dev = chan->device;
diff --git a/drivers/bus/vmbus/private.h b/drivers/bus/vmbus/private.h
index 211127dd..f19b14e4 100644
--- a/drivers/bus/vmbus/private.h
+++ b/drivers/bus/vmbus/private.h
@@ -45,6 +45,7 @@ struct mapped_vmbus_resource {
rte_uuid_t id;
int nb_maps;
+ struct vmbus_channel *primary;
struct vmbus_map maps[VMBUS_MAX_RESOURCE];
char path[PATH_MAX];
};
@@ -107,6 +108,8 @@ bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
int vmbus_uio_get_subchan(struct vmbus_channel *primary,
struct vmbus_channel **subchan);
int vmbus_uio_map_rings(struct vmbus_channel *chan);
+int vmbus_uio_map_secondary_subchan(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan);
void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen);
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index bd14c066..46b3ba3f 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -352,12 +352,21 @@ int vmbus_chan_create(const struct rte_vmbus_device *device,
int rte_vmbus_chan_open(struct rte_vmbus_device *device,
struct vmbus_channel **new_chan)
{
+ struct mapped_vmbus_resource *uio_res;
int err;
+ uio_res = vmbus_uio_find_resource(device);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can't find uio resource");
+ return -EINVAL;
+ }
+
err = vmbus_chan_create(device, device->relid, 0,
device->monitor_id, new_chan);
- if (!err)
+ if (!err) {
device->primary = *new_chan;
+ uio_res->primary = *new_chan;
+ }
return err;
}
@@ -396,11 +405,16 @@ void rte_vmbus_chan_close(struct vmbus_channel *chan)
const struct rte_vmbus_device *device = chan->device;
struct vmbus_channel *primary = device->primary;
- if (chan != primary)
+ /*
+ * intentionally leak primary channel because
+ * secondary may still reference it
+ */
+ if (chan != primary) {
STAILQ_REMOVE(&primary->subchannel_list, chan,
vmbus_channel, next);
+ rte_free(chan);
+ }
- rte_free(chan);
}
static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br)
diff --git a/drivers/bus/vmbus/vmbus_common_uio.c b/drivers/bus/vmbus/vmbus_common_uio.c
index 5ddd36ab..9947f82a 100644
--- a/drivers/bus/vmbus/vmbus_common_uio.c
+++ b/drivers/bus/vmbus/vmbus_common_uio.c
@@ -27,6 +27,7 @@ static int
vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
{
int fd, i;
+ struct vmbus_channel *chan;
struct mapped_vmbus_resource *uio_res;
struct mapped_vmbus_res_list *uio_res_list
= RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
@@ -47,9 +48,10 @@ vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
for (i = 0; i != uio_res->nb_maps; i++) {
void *mapaddr;
+ off_t offset = i * PAGE_SIZE;
mapaddr = vmbus_map_resource(uio_res->maps[i].addr,
- fd, 0,
+ fd, offset,
uio_res->maps[i].size, 0);
if (mapaddr == uio_res->maps[i].addr)
@@ -75,6 +77,20 @@ vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
/* fd is not needed in slave process, close it */
close(fd);
+
+ dev->primary = uio_res->primary;
+ if (!dev->primary) {
+ VMBUS_LOG(ERR, "missing primary channel");
+ return -1;
+ }
+
+ STAILQ_FOREACH(chan, &dev->primary->subchannel_list, next) {
+ if (vmbus_uio_map_secondary_subchan(dev, chan) != 0) {
+ VMBUS_LOG(ERR, "cannot map secondary subchan");
+ return -1;
+ }
+ }
+
return 0;
}
@@ -97,9 +113,9 @@ vmbus_uio_map_primary(struct rte_vmbus_device *dev)
/* Map the resources */
for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
- /* skip empty BAR */
+ /* stop at empty BAR */
if (dev->resource[i].len == 0)
- continue;
+ break;
ret = vmbus_uio_map_resource_by_index(dev, i, uio_res, 0);
if (ret)
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 5933ea77..d408d50e 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -549,7 +549,7 @@ cpt_digest_gen_prep(uint32_t flags,
/* Minor op is passthrough */
opcode.s.minor = 0x03;
/* Send out completion code only */
- vq_cmd_w0.s.param2 = 0x1;
+ vq_cmd_w0.s.param2 = rte_cpu_to_be_16(0x1);
}
vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 4e66c583..935ec3fb 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -211,16 +211,17 @@ int qat_qp_setup(struct qat_pci_device *qat_dev,
}
/* Allocate the queue pair data structure. */
- qp = rte_zmalloc("qat PMD qp metadata",
- sizeof(*qp), RTE_CACHE_LINE_SIZE);
+ qp = rte_zmalloc_socket("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE,
+ qat_qp_conf->socket_id);
if (qp == NULL) {
QAT_LOG(ERR, "Failed to alloc mem for qp struct");
return -ENOMEM;
}
qp->nb_descriptors = qat_qp_conf->nb_descriptors;
- qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+ qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer",
qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
- RTE_CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id);
if (qp->op_cookies == NULL) {
QAT_LOG(ERR, "Failed to alloc mem for cookie");
rte_free(qp);
@@ -260,7 +261,8 @@ int qat_qp_setup(struct qat_pci_device *qat_dev,
qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
qp->nb_descriptors,
qat_qp_conf->cookie_size, 64, 0,
- NULL, NULL, NULL, NULL, qat_qp_conf->socket_id,
+ NULL, NULL, NULL, NULL,
+ qat_dev->pci_dev->device.numa_node,
0);
if (!qp->op_cookie_pool) {
QAT_LOG(ERR, "QAT PMD Cannot create"
@@ -388,7 +390,7 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
qp_conf->service_str, "qp_mem",
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
- qp_conf->socket_id);
+ qat_dev->pci_dev->device.numa_node);
if (qp_mz == NULL) {
QAT_LOG(ERR, "Failed to allocate ring memzone");
return -ENOMEM;
diff --git a/drivers/compress/isal/isal_compress_pmd.c b/drivers/compress/isal/isal_compress_pmd.c
index 9f1e9688..f208b650 100644
--- a/drivers/compress/isal/isal_compress_pmd.c
+++ b/drivers/compress/isal/isal_compress_pmd.c
@@ -372,7 +372,7 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
uint8_t *temp_level_buf = qp->stream->level_buf;
/* Initialize compression stream */
- isal_deflate_stateless_init(qp->stream);
+ isal_deflate_init(qp->stream);
qp->stream->level_buf = temp_level_buf;
@@ -461,8 +461,6 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
}
op->consumed = qp->stream->total_in;
op->produced = qp->stream->total_out;
- isal_deflate_reset(qp->stream);
-
return ret;
}
@@ -538,7 +536,6 @@ process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
op->consumed = op->src.length - qp->state->avail_in;
}
op->produced = qp->state->total_out;
- isal_inflate_reset(qp->state);
return ret;
}
diff --git a/drivers/compress/isal/isal_compress_pmd_ops.c b/drivers/compress/isal/isal_compress_pmd_ops.c
index 41cade87..472e54e8 100644
--- a/drivers/compress/isal/isal_compress_pmd_ops.c
+++ b/drivers/compress/isal/isal_compress_pmd_ops.c
@@ -133,10 +133,18 @@ isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused,
{
if (dev_info != NULL) {
dev_info->capabilities = isal_pmd_capabilities;
- dev_info->feature_flags = RTE_COMPDEV_FF_CPU_AVX512 |
- RTE_COMPDEV_FF_CPU_AVX2 |
- RTE_COMPDEV_FF_CPU_AVX |
- RTE_COMPDEV_FF_CPU_SSE;
+
+ /* Check CPU for supported vector instruction and set
+ * feature_flags
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_AVX;
+ else
+ dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_SSE;
}
}
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index ea930772..139298ef 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -185,7 +185,7 @@ qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
memzone = rte_memzone_reserve_aligned(inter_buff_mz_name, full_size,
comp_dev->compressdev->data->socket_id,
- RTE_MEMZONE_2MB, QAT_64_BYTE_ALIGN);
+ RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
if (memzone == NULL) {
QAT_LOG(ERR, "Can't allocate intermediate buffers"
" for device %s", comp_dev->qat_dev->name);
@@ -239,7 +239,8 @@ qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
static struct rte_mempool *
qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
- uint32_t num_elements)
+ struct rte_compressdev_config *config,
+ uint32_t num_elements)
{
char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *mp;
@@ -264,7 +265,7 @@ qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
mp = rte_mempool_create(xform_pool_name,
num_elements,
qat_comp_xform_size(), 0, 0,
- NULL, NULL, NULL, NULL, rte_socket_id(),
+ NULL, NULL, NULL, NULL, config->socket_id,
0);
if (mp == NULL) {
QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
@@ -320,7 +321,7 @@ qat_comp_dev_config(struct rte_compressdev *dev,
}
}
- comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+ comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, config,
config->max_nb_priv_xforms);
if (comp_dev->xformpool == NULL) {
diff --git a/drivers/crypto/caam_jr/caam_jr.c b/drivers/crypto/caam_jr/caam_jr.c
index 190053ee..efc69b6e 100644
--- a/drivers/crypto/caam_jr/caam_jr.c
+++ b/drivers/crypto/caam_jr/caam_jr.c
@@ -311,7 +311,7 @@ caam_jr_prep_cdb(struct caam_jr_session *ses)
int32_t shared_desc_len = 0;
struct sec_cdb *cdb;
int err;
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
int swap = false;
#else
int swap = true;
@@ -798,7 +798,7 @@ build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
sg->len = cpu_to_caam32(ses->digest_length);
length += ses->digest_length;
} else {
- length -= ses->digest_length;
+ sg->len -= ses->digest_length;
}
/* last element*/
diff --git a/drivers/crypto/caam_jr/caam_jr_uio.c b/drivers/crypto/caam_jr/caam_jr_uio.c
index bf872a22..afd75c9a 100644
--- a/drivers/crypto/caam_jr/caam_jr_uio.c
+++ b/drivers/crypto/caam_jr/caam_jr_uio.c
@@ -362,8 +362,8 @@ free_job_ring(uint32_t uio_fd)
job_ring->register_base_addr,
(unsigned long)job_ring->map_size, strerror(errno));
} else
- CAAM_JR_DEBUG(" JR UIO memory unmapped at %p",
- job_ring->register_base_addr);
+ CAAM_JR_DEBUG("JR UIO memory is unmapped");
+
job_ring->register_base_addr = NULL;
}
@@ -445,7 +445,11 @@ sec_configure(void)
ret = file_read_first_line(SEC_UIO_DEVICE_SYS_ATTR_PATH,
dir->d_name, "name", uio_name);
CAAM_JR_INFO("sec device uio name: %s", uio_name);
- SEC_ASSERT(ret == 0, -1, "file_read_first_line failed");
+ if (ret != 0) {
+ CAAM_JR_ERR("file_read_first_line failed\n");
+ closedir(d);
+ return -1;
+ }
if (file_name_match_extract(uio_name,
SEC_UIO_DEVICE_NAME,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 34c14f77..a7973cc0 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -236,8 +236,8 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
/* Configure Output SGE for Encap/Decap */
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
- DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
- auth_only_len);
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
+ RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
mbuf = mbuf->next;
@@ -400,8 +400,8 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
/* Configure Output SGE for Encap/Decap */
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
- DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
- dst->data_off - auth_only_len);
+ DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
+ RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
sge->length = sym_op->aead.data.length + auth_only_len;
if (sess->dir == DIR_ENC) {
@@ -2864,7 +2864,7 @@ dpaa2_sec_security_session_destroy(void *dev __rte_unused,
rte_free(s->ctxt);
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
- memset(sess, 0, sizeof(dpaa2_sec_session));
+ memset(s, 0, sizeof(dpaa2_sec_session));
set_sec_session_private_data(sess, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
@@ -2913,7 +2913,7 @@ dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
rte_free(s->ctxt);
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
- memset(sess, 0, sizeof(dpaa2_sec_session));
+ memset(s, 0, sizeof(dpaa2_sec_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
index 719ef605..fee84410 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
@@ -1,7 +1,5 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+
* Copyright 2008-2013 Freescale Semiconductor, Inc.
- *
- * SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+
*/
#ifndef __DESC_PDCP_H__
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 8958fd06..10201c58 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -2166,7 +2166,7 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused,
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
- memset(sess, 0, sizeof(dpaa_sec_session));
+ memset(s, 0, sizeof(dpaa_sec_session));
set_sec_session_private_data(sess, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
diff --git a/drivers/crypto/kasumi/meson.build b/drivers/crypto/kasumi/meson.build
index a09b0e25..9a8956a0 100644
--- a/drivers/crypto/kasumi/meson.build
+++ b/drivers/crypto/kasumi/meson.build
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
-lib = cc.find_library('libsso_kasumi', required: false)
-if not lib.found()
+lib = cc.find_library('sso_kasumi', required: false)
+if not lib.found() or not cc.has_header('sso_kasumi.h')
build = false
-else
- ext_deps += lib
+ subdir_done()
endif
+ext_deps += lib
sources = files('rte_kasumi_pmd.c', 'rte_kasumi_pmd_ops.c')
deps += ['bus_vdev']
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index bf1bd928..83e78860 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -3,7 +3,7 @@
drivers = ['aesni_gcm', 'aesni_mb', 'caam_jr', 'ccp', 'dpaa_sec', 'dpaa2_sec',
'kasumi', 'mvsam', 'null', 'octeontx', 'openssl', 'qat', 'scheduler',
- 'virtio', 'zuc']
+ 'snow3g', 'virtio', 'zuc']
std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 11ea0d19..5b27bb91 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1799,6 +1799,9 @@ process_openssl_modinv_op(struct rte_crypto_op *cop,
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
+ BN_clear(res);
+ BN_clear(base);
+
return 0;
}
@@ -1820,17 +1823,20 @@ process_openssl_modexp_op(struct rte_crypto_op *cop,
return -1;
}
- base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
- op->modinv.base.length, base);
+ base = BN_bin2bn((const unsigned char *)op->modex.base.data,
+ op->modex.base.length, base);
if (BN_mod_exp(res, base, sess->u.e.exp,
sess->u.e.mod, sess->u.e.ctx)) {
- op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ op->modex.base.length = BN_bn2bin(res, op->modex.base.data);
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
+ BN_clear(res);
+ BN_clear(base);
+
return 0;
}
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 4d7ec01d..e147572e 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -242,7 +242,8 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
break;
case RTE_CRYPTO_CIPHER_NULL:
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
break;
case RTE_CRYPTO_CIPHER_KASUMI_F8:
if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
diff --git a/drivers/crypto/snow3g/meson.build b/drivers/crypto/snow3g/meson.build
new file mode 100644
index 00000000..c566a5f6
--- /dev/null
+++ b/drivers/crypto/snow3g/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2019 Intel Corporation
+
+lib = cc.find_library('sso_snow3g', required: false)
+if not lib.found() or not cc.has_header('sso_snow3g.h')
+ build = false
+ subdir_done()
+endif
+
+allow_experimental_apis = true
+ext_deps += lib
+sources = files('rte_snow3g_pmd.c', 'rte_snow3g_pmd_ops.c')
+deps += ['bus_vdev', 'cryptodev']
diff --git a/drivers/crypto/virtio/virtio_logs.h b/drivers/crypto/virtio/virtio_logs.h
index 26a286cf..1ee38193 100644
--- a/drivers/crypto/virtio/virtio_logs.h
+++ b/drivers/crypto/virtio/virtio_logs.h
@@ -7,8 +7,10 @@
#include <rte_log.h>
+extern int virtio_crypto_logtype_init;
+
#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \
"PMD: %s(): " fmt "\n", __func__, ##args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c
index e32a1ecd..e9a63cb5 100644
--- a/drivers/crypto/virtio/virtio_rxtx.c
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -203,8 +203,8 @@ virtqueue_crypto_sym_enqueue_xmit(
uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
uint32_t indirect_vring_addr_offset = req_data_len +
sizeof(struct virtio_crypto_inhdr);
- uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset +
- sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP;
+ uint32_t indirect_iv_addr_offset =
+ offsetof(struct virtio_crypto_op_cookie, iv);
struct rte_crypto_sym_op *sym_op = cop->sym;
struct virtio_crypto_session *session =
(struct virtio_crypto_session *)get_sym_session_private_data(
diff --git a/drivers/crypto/zuc/meson.build b/drivers/crypto/zuc/meson.build
index b8ca7107..2b48072c 100644
--- a/drivers/crypto/zuc/meson.build
+++ b/drivers/crypto/zuc/meson.build
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
-lib = cc.find_library('libsso_zuc', required: false)
-if not lib.found()
+lib = cc.find_library('sso_zuc', required: false)
+if not lib.found() or not cc.has_header('sso_zuc.h')
build = false
-else
- ext_deps += lib
+ subdir_done()
endif
+ext_deps += lib
sources = files('rte_zuc_pmd.c', 'rte_zuc_pmd_ops.c')
deps += ['bus_vdev']
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index c847b3ea..bdac1aa5 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -1,8 +1,5 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright 2017 NXP
- *
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
*/
#ifndef __DPAA2_EVENTDEV_H__
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index a2c2060c..86f2e539 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -1,5 +1,4 @@
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 NXP
*/
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index 33ba1364..9387d414 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -102,9 +102,6 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
return -ENOTSUP;
- if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
- return -ENOTSUP;
-
/* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
* avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
* the queue will only have a single serving port, no
@@ -113,8 +110,12 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
*/
if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
- else /* atomic or parallel */
+ else {
+ if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
+ return -ENOTSUP;
+ /* atomic or parallel */
queue->schedule_type = conf->schedule_type;
+ }
queue->num_serving_ports = 0;
@@ -217,7 +218,9 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
.max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
.max_num_events = DSW_MAX_EVENTS,
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
- RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE|
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
};
}
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
index 836ecbb7..fb723f72 100644
--- a/drivers/event/meson.build
+++ b/drivers/event/meson.build
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['dpaa', 'dpaa2', 'octeontx', 'opdl', 'skeleton', 'sw', 'dsw']
+drivers = ['dpaa', 'dpaa2', 'opdl', 'skeleton', 'sw', 'dsw']
+if not (toolchain == 'gcc' and cc.version().version_compare('<4.8.6') and
+ dpdk_conf.has('RTE_ARCH_ARM64'))
+ drivers += 'octeontx'
+endif
std_deps = ['eventdev', 'kvargs']
config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index a4f0bc8b..d2d2be44 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -422,16 +422,17 @@ opdl_dump(struct rte_eventdev *dev, FILE *f)
else
p_type = "????";
- sprintf(queue_id, "%02u", port->external_qid);
+ snprintf(queue_id, sizeof(queue_id), "%02u",
+ port->external_qid);
if (port->p_type == OPDL_REGULAR_PORT ||
port->p_type == OPDL_ASYNC_PORT)
- sprintf(total_cyc,
+ snprintf(total_cyc, sizeof(total_cyc),
" %'16"PRIu64"",
(cpg != 0 ?
port->port_stat[total_cycles] / cpg
: 0));
else
- sprintf(total_cyc,
+ snprintf(total_cyc, sizeof(total_cyc),
" ----");
fprintf(f,
"%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
diff --git a/drivers/event/opdl/opdl_evdev_xstats.c b/drivers/event/opdl/opdl_evdev_xstats.c
index 0e6c6bd5..27b3d880 100644
--- a/drivers/event/opdl/opdl_evdev_xstats.c
+++ b/drivers/event/opdl/opdl_evdev_xstats.c
@@ -32,10 +32,9 @@ opdl_xstats_init(struct rte_eventdev *dev)
uint32_t index = (i * max_num_port_xstat) + j;
/* Name */
- sprintf(device->port_xstat[index].stat.name,
- "port_%02u_%s",
- i,
- port_xstat_str[j]);
+ snprintf(device->port_xstat[index].stat.name,
+ sizeof(device->port_xstat[index].stat.name),
+ "port_%02u_%s", i, port_xstat_str[j]);
/* ID */
device->port_xstat[index].id = index;
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
index 751a59db..14ababe0 100644
--- a/drivers/event/opdl/opdl_ring.h
+++ b/drivers/event/opdl/opdl_ring.h
@@ -24,7 +24,7 @@
* packets.
*
* A opdl_ring can be used as the basis for pipeline based applications. Instead
- * of each stage in a pipeline dequeueing from a ring, processing and enqueueing
+ * of each stage in a pipeline dequeuing from a ring, processing and enqueuing
* to another ring, it can process entries in-place on the ring. If stages do
* not depend on each other, they can run in parallel.
*
@@ -152,7 +152,7 @@ opdl_ring_get_name(const struct opdl_ring *t);
* Enabling this may have a negative impact on performance if only one thread
* will be processing this stage.
* @param is_input
- * Indication to nitialise the stage with all slots available or none
+ * Indication to initialise the stage with all slots available or none
*
* @return
* A pointer to the new stage, or NULL on error.
@@ -589,7 +589,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
* Compare the event descriptor with original version in the ring.
* if key field event descriptor is changed by application, then
* update the slot in the ring otherwise do nothing with it.
- * the key field is flow_id, prioirty, mbuf, impl_opaque
+ * the key field is flow_id, priority, mbuf, impl_opaque
*
* @param s
* The opdl_stage.
@@ -600,7 +600,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
* @param atomic
* queue type associate with the stage.
* @return
- * if the evevnt key field is changed compare with previous record.
+ * if the event key field is changed compare with previous record.
*/
bool
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index d00d5de6..38c21fa0 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -385,7 +385,7 @@ run_prio_packet_test(struct test *t)
.mbuf = arp
};
err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: error failed to enqueue\n", __LINE__);
return -1;
}
@@ -477,7 +477,7 @@ test_single_directed_packet(struct test *t)
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: error failed to enqueue\n", __LINE__);
return -1;
}
@@ -546,7 +546,7 @@ test_directed_forward_credits(struct test *t)
for (i = 0; i < 1000; i++) {
err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: error failed to enqueue\n", __LINE__);
return -1;
}
@@ -707,7 +707,7 @@ burst_packets(struct test *t)
};
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
@@ -795,7 +795,7 @@ abuse_inflights(struct test *t)
/* Enqueue op only */
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
@@ -2026,7 +2026,7 @@ load_balancing(struct test *t)
};
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
@@ -2125,7 +2125,7 @@ load_balancing_history(struct test *t)
}
arp->hash.rss = flows1[i];
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
@@ -2175,7 +2175,7 @@ load_balancing_history(struct test *t)
arp->hash.rss = flows2[i];
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
@@ -2285,7 +2285,7 @@ invalid_qid(struct test *t)
};
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
@@ -2372,7 +2372,7 @@ single_packet(struct test *t)
arp->seqn = MAGIC_SEQN;
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
@@ -2416,7 +2416,7 @@ single_packet(struct test *t)
rte_pktmbuf_free(ev.mbuf);
err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
- if (err < 0) {
+ if (err != 1) {
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 790cded8..e26b0ba1 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -316,8 +316,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
* in pool, qbman_swp_acquire returns 0
*/
if (ret <= 0) {
- DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
- " err code: %d", ret);
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Buffer acquire failed with err code: %d", ret);
/* The API expect the exact number of requested bufs */
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
index c79b3d1c..98626431 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
@@ -1,5 +1,4 @@
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 NXP
*/
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
index 5bc04f55..2d05bb4c 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -165,7 +165,8 @@ static struct rte_pci_driver rte_atl_pmd = {
| DEV_RX_OFFLOAD_IPV4_CKSUM \
| DEV_RX_OFFLOAD_UDP_CKSUM \
| DEV_RX_OFFLOAD_TCP_CKSUM \
- | DEV_RX_OFFLOAD_JUMBO_FRAME)
+ | DEV_RX_OFFLOAD_JUMBO_FRAME \
+ | DEV_RX_OFFLOAD_VLAN_FILTER)
#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
| DEV_TX_OFFLOAD_IPV4_CKSUM \
@@ -174,6 +175,8 @@ static struct rte_pci_driver rte_atl_pmd = {
| DEV_TX_OFFLOAD_TCP_TSO \
| DEV_TX_OFFLOAD_MULTI_SEGS)
+#define SFP_EEPROM_SIZE 0x100
+
static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = ATL_MAX_RING_DESC,
.nb_min = ATL_MIN_RING_DESC,
@@ -465,8 +468,6 @@ atl_dev_start(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
- uint32_t *link_speeds;
- uint32_t speed = 0;
int status;
int err;
@@ -543,6 +544,8 @@ atl_dev_start(struct rte_eth_dev *dev)
goto error;
}
+ err = atl_dev_set_link_up(dev);
+
err = hw->aq_fw_ops->update_link_status(hw);
if (err)
@@ -550,26 +553,6 @@ atl_dev_start(struct rte_eth_dev *dev)
dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
- link_speeds = &dev->data->dev_conf.link_speeds;
-
- speed = 0x0;
-
- if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
- speed = hw->aq_nic_cfg->link_speed_msk;
- } else {
- if (*link_speeds & ETH_LINK_SPEED_10G)
- speed |= AQ_NIC_RATE_10G;
- if (*link_speeds & ETH_LINK_SPEED_5G)
- speed |= AQ_NIC_RATE_5G;
- if (*link_speeds & ETH_LINK_SPEED_1G)
- speed |= AQ_NIC_RATE_1G;
- if (*link_speeds & ETH_LINK_SPEED_2_5G)
- speed |= AQ_NIC_RATE_2G5;
- if (*link_speeds & ETH_LINK_SPEED_100M)
- speed |= AQ_NIC_RATE_100M;
- }
-
- err = hw->aq_fw_ops->set_link_speed(hw, speed);
if (err)
goto error;
@@ -657,9 +640,25 @@ static int
atl_dev_set_link_up(struct rte_eth_dev *dev)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t link_speeds = dev->data->dev_conf.link_speeds;
+ uint32_t speed_mask = 0;
+
+ if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ speed_mask = hw->aq_nic_cfg->link_speed_msk;
+ } else {
+ if (link_speeds & ETH_LINK_SPEED_10G)
+ speed_mask |= AQ_NIC_RATE_10G;
+ if (link_speeds & ETH_LINK_SPEED_5G)
+ speed_mask |= AQ_NIC_RATE_5G;
+ if (link_speeds & ETH_LINK_SPEED_1G)
+ speed_mask |= AQ_NIC_RATE_1G;
+ if (link_speeds & ETH_LINK_SPEED_2_5G)
+ speed_mask |= AQ_NIC_RATE_2G5;
+ if (link_speeds & ETH_LINK_SPEED_100M)
+ speed_mask |= AQ_NIC_RATE_100M;
+ }
- return hw->aq_fw_ops->set_link_speed(hw,
- hw->aq_nic_cfg->link_speed_msk);
+ return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
}
/*
@@ -761,7 +760,7 @@ atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
atl_xstats_tbl[i].name);
- return size;
+ return i;
}
static int
@@ -781,7 +780,7 @@ atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
atl_xstats_tbl[i].offset);
}
- return n;
+ return i;
}
static int
@@ -879,6 +878,7 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
struct atl_interrupt *intr =
ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct rte_eth_link link, old;
+ u32 fc = AQ_NIC_FC_OFF;
int err = 0;
link.link_status = ETH_LINK_DOWN;
@@ -915,6 +915,15 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
if (link.link_status == old.link_status)
return -1;
+ /* Driver has to update flow control settings on RX block
+ * on any link event.
+ * We should query FW whether it negotiated FC.
+ */
+ if (hw->aq_fw_ops->get_flow_control) {
+ hw->aq_fw_ops->get_flow_control(hw, &fc);
+ hw_atl_b0_set_fc(hw, fc, 0U);
+ }
+
return 0;
}
@@ -1094,8 +1103,6 @@ atl_dev_interrupt_handler(void *param)
atl_dev_interrupt_action(dev, dev->intr_handle);
}
-#define SFP_EEPROM_SIZE 0xff
-
static int
atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
{
@@ -1106,28 +1113,46 @@ static int
atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int dev_addr = SMBUS_DEVICE_ID;
if (hw->aq_fw_ops->get_eeprom == NULL)
return -ENOTSUP;
- if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
+ if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
+ eeprom->data == NULL)
+ return -EINVAL;
+
+ if (eeprom->magic > 0x7F)
return -EINVAL;
- return hw->aq_fw_ops->get_eeprom(hw, eeprom->data, eeprom->length);
+ if (eeprom->magic)
+ dev_addr = eeprom->magic;
+
+ return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
+ eeprom->length, eeprom->offset);
}
static int
atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int dev_addr = SMBUS_DEVICE_ID;
if (hw->aq_fw_ops->set_eeprom == NULL)
return -ENOTSUP;
- if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
+ if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
+ eeprom->data == NULL)
+ return -EINVAL;
+
+ if (eeprom->magic > 0x7F)
return -EINVAL;
- return hw->aq_fw_ops->set_eeprom(hw, eeprom->data, eeprom->length);
+ if (eeprom->magic)
+ dev_addr = eeprom->magic;
+
+ return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
+ eeprom->length, eeprom->offset);
}
static int
@@ -1160,16 +1185,21 @@ static int
atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 fc = AQ_NIC_FC_OFF;
+
+ if (hw->aq_fw_ops->get_flow_control == NULL)
+ return -ENOTSUP;
+
+ hw->aq_fw_ops->get_flow_control(hw, &fc);
- if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
+ if (fc == AQ_NIC_FC_OFF)
fc_conf->mode = RTE_FC_NONE;
- else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
+ else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
fc_conf->mode = RTE_FC_FULL;
- else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ else if (fc & AQ_NIC_FC_RX)
fc_conf->mode = RTE_FC_RX_PAUSE;
- else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ else if (fc & AQ_NIC_FC_TX)
fc_conf->mode = RTE_FC_TX_PAUSE;
-
return 0;
}
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 40c91379..fe007704 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -812,12 +812,12 @@ atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
ol_flags = m->ol_flags;
if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
return i;
}
if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ rte_errno = ENOTSUP;
return i;
}
@@ -946,7 +946,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
}
- PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u "
"eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
(unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id,
@@ -981,7 +981,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
while (true) {
new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (new_mbuf == NULL) {
- PMD_RX_LOG(ERR,
+ PMD_RX_LOG(DEBUG,
"RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id);
@@ -1084,7 +1084,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
adapter->sw_stats.q_ibytes[rxq->queue_id] +=
rx_mbuf_first->pkt_len;
- PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
+ PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d",
rx_mbuf_first->nb_segs,
rx_mbuf_first->pkt_len);
}
@@ -1104,7 +1104,7 @@ err_stop:
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
- PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
"nb_hold=%u nb_rx=%u",
(unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
(unsigned int)tail, (unsigned int)nb_hold,
@@ -1129,8 +1129,6 @@ atl_xmit_cleanup(struct atl_tx_queue *txq)
struct hw_atl_txd_s *txd;
int to_clean = 0;
- PMD_INIT_FUNC_TRACE();
-
if (txq != NULL) {
sw_ring = txq->sw_ring;
int head = txq->tx_head;
@@ -1181,11 +1179,7 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
uint32_t tx_cmd = 0;
uint64_t ol_flags = tx_pkt->ol_flags;
- PMD_INIT_FUNC_TRACE();
-
if (ol_flags & PKT_TX_TCP_SEG) {
- PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
-
tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
txc->cmd = 0x4;
@@ -1240,8 +1234,6 @@ atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
u32 tx_cmd = 0U;
int desc_count = 0;
- PMD_INIT_FUNC_TRACE();
-
tail = txq->tx_tail;
txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
@@ -1356,4 +1348,3 @@ atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
-
diff --git a/drivers/net/atlantic/atl_types.h b/drivers/net/atlantic/atl_types.h
index 3d90f6ca..c53d5896 100644
--- a/drivers/net/atlantic/atl_types.h
+++ b/drivers/net/atlantic/atl_types.h
@@ -94,6 +94,8 @@ struct aq_hw_s {
struct hw_atl_stats_s last_stats;
struct aq_stats_s curr_stats;
+ u32 caps_lo;
+
u64 speed;
unsigned int chip_features;
u32 fw_ver_actual;
@@ -133,13 +135,16 @@ struct aq_fw_ops {
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
u32 *supported_rates);
+ int (*get_flow_control)(struct aq_hw_s *self, u32 *fc);
int (*set_flow_control)(struct aq_hw_s *self);
int (*led_control)(struct aq_hw_s *self, u32 mode);
- int (*get_eeprom)(struct aq_hw_s *self, u32 *data, u32 len);
+ int (*get_eeprom)(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset);
- int (*set_eeprom)(struct aq_hw_s *self, u32 *data, u32 len);
+ int (*set_eeprom)(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset);
};
struct atl_sw_stats {
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
index 9400e0ed..a76268e9 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
@@ -26,12 +26,17 @@ int hw_atl_b0_hw_reset(struct aq_hw_s *self)
return err;
}
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+ return 0;
+}
+
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
- bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -64,7 +69,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = 0;
buff_size = HW_ATL_B0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -76,9 +80,7 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_rpb_rx_xoff_en_per_tc_set(self,
- is_rx_flow_control ? 1U : 0U,
- tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@@ -290,6 +292,8 @@ int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_en_set(self, 1U);
+
hw_atl_rdm_rx_dca_en_set(self, 0U);
hw_atl_rdm_rx_dca_mode_set(self, 0U);
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
index 06feb56c..d1ba2ace 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
@@ -11,6 +11,8 @@
int hw_atl_b0_hw_reset(struct aq_hw_s *self);
int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr);
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
+
int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
int index, int size, int cpu, int vec);
int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr,
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
index f11093a5..26260194 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
@@ -306,6 +306,11 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
HW_ATL_MIF_CMD)),
1, 1000U);
+ if (err) {
+ err = -ETIMEDOUT;
+ goto err_exit;
+ }
+
*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
a += 4;
}
@@ -328,12 +333,13 @@ int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
goto err_exit;
}
if (IS_CHIP_FEATURE(REVISION_B1)) {
- u32 offset = 0;
+ u32 mbox_offset = (a - self->rpc_addr) / sizeof(u32);
+ u32 data_offset = 0;
- for (; offset < cnt; ++offset) {
- aq_hw_write_reg(self, 0x328, p[offset]);
+ for (; data_offset < cnt; ++mbox_offset, ++data_offset) {
+ aq_hw_write_reg(self, 0x328, p[data_offset]);
aq_hw_write_reg(self, 0x32C,
- (0x80000000 | (0xFFFF & (offset * 4))));
+ (0x80000000 | (0xFFFF & (mbox_offset * 4))));
hw_atl_mcp_up_force_intr_set(self, 1);
/* 1000 times by 10us = 10ms */
AQ_HW_WAIT_FOR((aq_hw_read_reg(self,
@@ -462,8 +468,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
- if (err < 0)
- goto err_exit;
if (rpc) {
if (fw.len) {
@@ -875,8 +879,7 @@ static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
}
err = hw_atl_utils_fw_rpc_call(self, rpc_size);
- if (err < 0)
- goto err_exit;
+
err_exit:
return err;
}
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
index 5f3f7084..b1f03f42 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
@@ -336,15 +336,8 @@ struct offload_info {
u8 buf[0];
} __attribute__((__packed__));
-struct smbus_read_request {
- u32 offset; /* not used */
- u32 device_id;
- u32 address;
- u32 length;
-} __attribute__((__packed__));
-
-struct smbus_write_request {
- u32 offset; /* not used */
+struct smbus_request {
+ u32 msg_id; /* not used */
u32 device_id;
u32 address;
u32 length;
@@ -389,8 +382,6 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 10U
#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 13U // 0xd
-#define SMBUS_READ_REQUEST BIT(13)
-#define SMBUS_WRITE_REQUEST BIT(14)
#define SMBUS_DEVICE_ID 0x50
enum hw_atl_fw2x_rate {
@@ -414,6 +405,9 @@ enum hw_atl_fw2x_caps_lo {
CAPS_LO_2P5GBASET_FD,
CAPS_LO_5GBASET_FD,
CAPS_LO_10GBASET_FD,
+ CAPS_LO_AUTONEG,
+ CAPS_LO_SMBUS_READ,
+ CAPS_LO_SMBUS_WRITE,
};
enum hw_atl_fw2x_caps_hi {
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 6841d9bc..11f14d1a 100644
--- a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -34,7 +34,6 @@
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
-#define HW_ATL_FW_FEATURE_EEPROM 0x03010025
#define HW_ATL_FW_FEATURE_LED 0x03010026
struct fw2x_msg_wol_pattern {
@@ -62,6 +61,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static int aq_fw2x_init(struct aq_hw_s *self)
{
int err = 0;
+ struct hw_aq_atl_utils_mbox mbox;
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
@@ -70,6 +70,12 @@ static int aq_fw2x_init(struct aq_hw_s *self)
AQ_HW_WAIT_FOR(0U != (self->rpc_addr =
aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)),
1000U, 100U);
+
+ /* Read caps */
+ hw_atl_utils_mpi_read_stats(self, &mbox);
+
+ self->caps_lo = mbox.info.caps_lo;
+
return err;
}
@@ -462,7 +468,15 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
return err;
}
+static int aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fc)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ *fc = ((mpi_state & BIT(CAPS_HI_PAUSE)) ? AQ_NIC_FC_RX : 0) |
+ ((mpi_state & BIT(CAPS_HI_ASYMMETRIC_PAUSE)) ? AQ_NIC_FC_TX : 0);
+
+ return 0;
+}
static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
{
@@ -484,38 +498,42 @@ static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
return 0;
}
-static int aq_fw2x_get_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
+static int aq_fw2x_get_eeprom(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset)
{
- int err = 0;
- struct smbus_read_request request;
- u32 mpi_opts;
+ u32 bytes_remains = len % sizeof(u32);
+ u32 num_dwords = len / sizeof(u32);
+ struct smbus_request request;
u32 result = 0;
+ u32 mpi_opts;
+ int err = 0;
- if (self->fw_ver_actual < HW_ATL_FW_FEATURE_EEPROM)
+ if ((self->caps_lo & BIT(CAPS_LO_SMBUS_READ)) == 0)
return -EOPNOTSUPP;
- request.device_id = SMBUS_DEVICE_ID;
- request.address = 0;
+ request.msg_id = 0;
+ request.device_id = dev_addr;
+ request.address = offset;
request.length = len;
/* Write SMBUS request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
(u32 *)(void *)&request,
- RTE_ALIGN(sizeof(request), sizeof(u32)));
+ sizeof(request) / sizeof(u32));
if (err < 0)
return err;
- /* Toggle 0x368.SMBUS_READ_REQUEST bit */
+ /* Toggle 0x368.CAPS_LO_SMBUS_READ bit */
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR);
- mpi_opts ^= SMBUS_READ_REQUEST;
+ mpi_opts ^= BIT(CAPS_LO_SMBUS_READ);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts);
/* Wait until REQUEST_BIT matched in 0x370 */
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) &
- SMBUS_READ_REQUEST) == (mpi_opts & SMBUS_READ_REQUEST),
+ BIT(CAPS_LO_SMBUS_READ)) == (mpi_opts & BIT(CAPS_LO_SMBUS_READ)),
10U, 10000U);
if (err < 0)
@@ -523,64 +541,106 @@ static int aq_fw2x_get_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32),
&result,
- RTE_ALIGN(sizeof(result), sizeof(u32)));
+ sizeof(result) / sizeof(u32));
if (err < 0)
return err;
- if (result == 0) {
+ if (result)
+ return -EIO;
+
+ if (num_dwords) {
err = hw_atl_utils_fw_downld_dwords(self,
- self->rpc_addr + sizeof(u32) * 2,
- data,
- RTE_ALIGN(len, sizeof(u32)));
+ self->rpc_addr + sizeof(u32) * 2,
+ data,
+ num_dwords);
if (err < 0)
return err;
}
+ if (bytes_remains) {
+ u32 val = 0;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->rpc_addr + (sizeof(u32) * 2) +
+ (num_dwords * sizeof(u32)),
+ &val,
+ 1);
+
+ if (err < 0)
+ return err;
+
+ rte_memcpy((u8 *)data + len - bytes_remains,
+ &val, bytes_remains);
+ }
+
return 0;
}
-static int aq_fw2x_set_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
+static int aq_fw2x_set_eeprom(struct aq_hw_s *self, int dev_addr,
+ u32 *data, u32 len, u32 offset)
{
- struct smbus_write_request request;
+ struct smbus_request request;
u32 mpi_opts, result = 0;
int err = 0;
- if (self->fw_ver_actual < HW_ATL_FW_FEATURE_EEPROM)
+ if ((self->caps_lo & BIT(CAPS_LO_SMBUS_WRITE)) == 0)
return -EOPNOTSUPP;
- request.device_id = SMBUS_DEVICE_ID;
- request.address = 0;
+ request.msg_id = 0;
+ request.device_id = dev_addr;
+ request.address = offset;
request.length = len;
/* Write SMBUS request to cfg memory */
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
(u32 *)(void *)&request,
- RTE_ALIGN(sizeof(request), sizeof(u32)));
+ sizeof(request) / sizeof(u32));
if (err < 0)
return err;
/* Write SMBUS data to cfg memory */
- err = hw_atl_utils_fw_upload_dwords(self,
- self->rpc_addr + sizeof(request),
- (u32 *)(void *)data,
- RTE_ALIGN(len, sizeof(u32)));
+ u32 num_dwords = len / sizeof(u32);
+ u32 bytes_remains = len % sizeof(u32);
- if (err < 0)
- return err;
+ if (num_dwords) {
+ err = hw_atl_utils_fw_upload_dwords(self,
+ self->rpc_addr + sizeof(request),
+ (u32 *)(void *)data,
+ num_dwords);
+
+ if (err < 0)
+ return err;
+ }
- /* Toggle 0x368.SMBUS_WRITE_REQUEST bit */
+ if (bytes_remains) {
+ u32 val = 0;
+
+ rte_memcpy(&val, (u8 *)data + (sizeof(u32) * num_dwords),
+ bytes_remains);
+
+ err = hw_atl_utils_fw_upload_dwords(self,
+ self->rpc_addr + sizeof(request) +
+ (num_dwords * sizeof(u32)),
+ &val,
+ 1);
+
+ if (err < 0)
+ return err;
+ }
+
+ /* Toggle 0x368.CAPS_LO_SMBUS_WRITE bit */
mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR);
- mpi_opts ^= SMBUS_WRITE_REQUEST;
+ mpi_opts ^= BIT(CAPS_LO_SMBUS_WRITE);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts);
/* Wait until REQUEST_BIT matched in 0x370 */
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) &
- SMBUS_WRITE_REQUEST) == (mpi_opts & SMBUS_WRITE_REQUEST),
+ BIT(CAPS_LO_SMBUS_WRITE)) == (mpi_opts & BIT(CAPS_LO_SMBUS_WRITE)),
10U, 10000U);
if (err < 0)
@@ -589,11 +649,14 @@ static int aq_fw2x_set_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
/* Read status of write operation */
err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32),
&result,
- RTE_ALIGN(sizeof(result), sizeof(u32)));
+ sizeof(result) / sizeof(u32));
if (err < 0)
return err;
+ if (result)
+ return -EIO;
+
return 0;
}
@@ -611,6 +674,7 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.get_cable_len = aq_fw2x_get_cable_len,
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
+ .get_flow_control = aq_fw2x_get_flow_control,
.set_flow_control = aq_fw2x_set_flow_control,
.led_control = aq_fw2x_led_control,
.get_eeprom = aq_fw2x_get_eeprom,
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
index 13eec1b4..4dc61d9f 100644
--- a/drivers/net/avf/avf_ethdev.c
+++ b/drivers/net/avf/avf_ethdev.c
@@ -498,7 +498,6 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
@@ -1159,7 +1158,7 @@ avf_enable_irq0(struct avf_hw *hw)
AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
- AVFINT_DYN_CTL01_ITR_INDX_MASK);
+ AVFINT_DYN_CTL01_CLEARPBA_MASK | AVFINT_DYN_CTL01_ITR_INDX_MASK);
AVF_WRITE_FLUSH(hw);
}
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index d25d54ca..34f60f15 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -1351,9 +1351,9 @@ do { \
#define SET_BITS_LE(_var, _index, _width, _val) \
do { \
- (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\
+ (_var) &= rte_cpu_to_le_32(~(((0x1U << (_width)) - 1) << (_index)));\
(_var) |= rte_cpu_to_le_32((((_val) & \
- ((0x1 << (_width)) - 1)) << (_index))); \
+ ((0x1U << (_width)) - 1)) << (_index))); \
} while (0)
/* Bit setting and getting macros based on register fields
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 26b3828e..3e705c7a 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -25,6 +25,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <zlib.h>
+#include <rte_string_fns.h>
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
@@ -123,7 +124,7 @@ static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
@@ -184,6 +185,7 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
}
dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
+ dma->mzone = (const void *)z;
PMD_DRV_LOG(DEBUG, sc,
"%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
@@ -191,6 +193,19 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
return 0;
}
+void bnx2x_dma_free(struct bnx2x_dma *dma)
+{
+ if (dma->mzone == NULL)
+ return;
+
+ rte_memzone_free((const struct rte_memzone *)dma->mzone);
+ dma->sc = NULL;
+ dma->paddr = 0;
+ dma->vaddr = NULL;
+ dma->nseg = 0;
+ dma->mzone = NULL;
+}
+
static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
{
uint32_t lock_status;
@@ -1099,6 +1114,12 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
atomic_load_acq_long(&sc->cq_spq_left),
atomic_load_acq_long(&sc->eq_spq_left));
+ /* RAMROD completion is processed in bnx2x_intr_legacy()
+ * which can run from different contexts.
+ * Ask bnx2x_intr_intr() to process RAMROD
+ * completion whenever it gets scheduled.
+ */
+ rte_atomic32_set(&sc->scan_fp, 1);
bnx2x_sp_prod_update(sc);
return 0;
@@ -2435,6 +2456,7 @@ static int bnx2x_alloc_mem(struct bnx2x_softc *sc)
static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc)
{
+ bnx2x_dma_free(&sc->fw_stats_dma);
sc->fw_stats_num = 0;
sc->fw_stats_req_size = 0;
@@ -4523,7 +4545,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
return rc;
}
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp)
{
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
@@ -4538,14 +4560,14 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
- if (scan_fp) {
+ if (rte_atomic32_read(&sc->scan_fp) == 1) {
if (bnx2x_has_rx_work(fp)) {
more_rx = bnx2x_rxeof(sc, fp);
}
if (more_rx) {
/* still more work to do */
- bnx2x_handle_fp_tq(fp, scan_fp);
+ bnx2x_handle_fp_tq(fp);
return;
}
}
@@ -4561,7 +4583,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
* then calls a separate routine to handle the various
* interrupt causes: link, RX, and TX.
*/
-int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
+int bnx2x_intr_legacy(struct bnx2x_softc *sc)
{
struct bnx2x_fastpath *fp;
uint32_t status, mask;
@@ -4593,7 +4615,7 @@ int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
/* acknowledge and disable further fastpath interrupts */
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
0, IGU_INT_DISABLE, 0);
- bnx2x_handle_fp_tq(fp, scan_fp);
+ bnx2x_handle_fp_tq(fp);
status &= ~mask;
}
}
@@ -8081,6 +8103,27 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
}
+ val = sc->devinfo.bc_ver >> 8;
+ if (val < BNX2X_BC_VER) {
+ /* for now only warn later we might need to enforce this */
+ PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC\n",
+ BNX2X_BC_VER, val);
+ }
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY :
+ 0;
+
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
+ sc->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
/* get the initial value of the link params */
sc->link_params.multi_phy_config =
SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
@@ -11741,13 +11784,13 @@ static const char *get_bnx2x_flags(uint32_t flags)
for (i = 0; i < 5; i++)
if (flags & (1 << i)) {
- strcat(flag_str, flag[i]);
+ strlcat(flag_str, flag[i], sizeof(flag_str));
flags ^= (1 << i);
}
if (flags) {
static char unknown[BNX2X_INFO_STR_MAX];
snprintf(unknown, 32, "Unknown flag mask %x", flags);
- strcat(flag_str, unknown);
+ strlcat(flag_str, unknown, sizeof(flag_str));
}
return flag_str;
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 32a12294..ef1688ff 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -119,6 +119,8 @@ int bnx2x_ilog2(int x)
#define ilog2(x) bnx2x_ilog2(x)
#endif
+#define BNX2X_BC_VER 0x040200
+
#include "ecore_sp.h"
struct bnx2x_device_type {
@@ -319,6 +321,7 @@ struct bnx2x_dma {
rte_iova_t paddr;
void *vaddr;
int nseg;
+ const void *mzone;
char msg[RTE_MEMZONE_NAMESIZE - 6];
};
@@ -1089,7 +1092,7 @@ struct bnx2x_softc {
#define PERIODIC_STOP 0
#define PERIODIC_GO 1
volatile unsigned long periodic_flags;
-
+ rte_atomic32_t scan_fp;
struct bnx2x_fastpath fp[MAX_RSS_CHAINS];
struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS];
@@ -1753,7 +1756,7 @@ int bnx2x_cmpxchg(volatile int *addr, int old, int new);
int bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size,
struct bnx2x_dma *dma, const char *msg, uint32_t align);
-
+void bnx2x_dma_free(struct bnx2x_dma *dma);
uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type);
uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode);
uint32_t bnx2x_dmae_opcode(struct bnx2x_softc *sc, uint8_t src_type,
@@ -1938,7 +1941,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0);
uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
void bnx2x_print_device_info(struct bnx2x_softc *sc);
-int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp);
+int bnx2x_intr_legacy(struct bnx2x_softc *sc);
void bnx2x_link_status_update(struct bnx2x_softc *sc);
int bnx2x_complete_sp(struct bnx2x_softc *sc);
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index cc7816dd..c628cdc0 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -107,14 +107,15 @@ bnx2x_link_update(struct rte_eth_dev *dev)
}
static void
-bnx2x_interrupt_action(struct rte_eth_dev *dev)
+bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt)
{
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
- bnx2x_intr_legacy(sc, 0);
+ bnx2x_intr_legacy(sc);
- if (sc->periodic_flags & PERIODIC_GO)
+ if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) &&
+ !intr_cxt)
bnx2x_periodic_callout(sc);
link_status = REG_RD(sc, sc->link_params.shmem_base +
offsetof(struct shmem_region,
@@ -131,9 +132,7 @@ bnx2x_interrupt_handler(void *param)
PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
- bnx2x_interrupt_action(dev);
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
+ bnx2x_interrupt_action(dev, 1);
rte_intr_enable(&sc->pci_dev->intr_handle);
}
@@ -144,7 +143,7 @@ static void bnx2x_periodic_start(void *param)
int ret = 0;
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
- bnx2x_interrupt_action(dev);
+ bnx2x_interrupt_action(dev, 0);
if (IS_PF(sc)) {
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
bnx2x_periodic_start, (void *)dev);
@@ -164,6 +163,8 @@ void bnx2x_periodic_stop(void *param)
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
+
+ PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped");
}
/*
@@ -180,8 +181,10 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ dev->data->mtu = sc->mtu;
+ }
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
@@ -210,6 +213,7 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
return -ENXIO;
}
+ bnx2x_dev_rxtx_init_dummy(dev);
return 0;
}
@@ -222,8 +226,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
/* start the periodic callout */
- if (sc->periodic_flags & PERIODIC_STOP)
+ if (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP) {
bnx2x_periodic_start(dev);
+ PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
+ }
ret = bnx2x_init(sc);
if (ret) {
@@ -239,11 +245,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
}
- ret = bnx2x_dev_rx_init(dev);
- if (ret != 0) {
- PMD_DRV_LOG(DEBUG, sc, "bnx2x_dev_rx_init returned error code");
- return -3;
- }
+ bnx2x_dev_rxtx_init(dev);
bnx2x_print_device_info(sc);
@@ -258,6 +260,8 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
+ bnx2x_dev_rxtx_init_dummy(dev);
+
if (IS_PF(sc)) {
rte_intr_disable(&sc->pci_dev->intr_handle);
rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index ca28aacc..e5a2b25b 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -311,7 +311,6 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_bd_tail = 0;
txq->tx_bd_head = 0;
txq->nb_tx_avail = txq->nb_tx_desc;
- dev->tx_pkt_burst = bnx2x_xmit_pkts;
dev->data->tx_queues[queue_idx] = txq;
if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
@@ -441,14 +440,26 @@ next_rx:
return nb_rx;
}
-int
-bnx2x_dev_rx_init(struct rte_eth_dev *dev)
+static uint16_t
+bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
{
- dev->rx_pkt_burst = bnx2x_recv_pkts;
-
return 0;
}
+void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev)
+{
+ dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy;
+}
+
+void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev)
+{
+ dev->rx_pkt_burst = bnx2x_recv_pkts;
+ dev->tx_pkt_burst = bnx2x_xmit_pkts;
+}
+
void
bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index 6ad4928c..3f4692b4 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -74,7 +74,8 @@ int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
void bnx2x_dev_rx_queue_release(void *rxq);
void bnx2x_dev_tx_queue_release(void *txq);
-int bnx2x_dev_rx_init(struct rte_eth_dev *dev);
+void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev);
+void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev);
void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
#endif /* _BNX2X_RXTX_H_ */
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 1192e5dd..74189eed 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -3545,7 +3545,7 @@ struct igu_regular
#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
#define IGU_REGULAR_CLEANUP_SET (0x1<<30) /* BitField sb_id_and_flags */
#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
-#define IGU_REGULAR_BCLEANUP (0x1<<31) /* BitField sb_id_and_flags */
+#define IGU_REGULAR_BCLEANUP (0x1U<<31) /* BitField sb_id_and_flags */
#define IGU_REGULAR_BCLEANUP_SHIFT 31
uint32_t reserved_2;
};
diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h
index d69e857b..7af9a2d8 100644
--- a/drivers/net/bnx2x/ecore_reg.h
+++ b/drivers/net/bnx2x/ecore_reg.h
@@ -1981,7 +1981,7 @@
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
-#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1U<<31)
#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 6d2bb815..43194095 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -291,25 +291,33 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state,
cnt *= 20;
ECORE_MSG(sc, "waiting for state to become %d", state);
+ /* being over protective to remind bnx2x_intr_legacy() to
+ * process RAMROD
+ */
+ rte_atomic32_set(&sc->scan_fp, 1);
ECORE_MIGHT_SLEEP();
while (cnt--) {
- bnx2x_intr_legacy(sc, 1);
+ bnx2x_intr_legacy(sc);
if (!ECORE_TEST_BIT(state, pstate)) {
#ifdef ECORE_STOP_ON_ERROR
ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
#endif
+ rte_atomic32_set(&sc->scan_fp, 0);
return ECORE_SUCCESS;
}
ECORE_WAIT(sc, delay_us);
- if (sc->panic)
+ if (sc->panic) {
+ rte_atomic32_set(&sc->scan_fp, 0);
return ECORE_IO;
+ }
}
/* timeout! */
PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
+ rte_atomic32_set(&sc->scan_fp, 0);
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index f295bf5a..7126097d 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -151,14 +151,15 @@ typedef rte_spinlock_t ECORE_MUTEX_SPIN;
} \
} while (0)
-#define ECORE_ILT_FREE(x, y, size) \
- do { \
- if (x) { \
- rte_free(x); \
- x = NULL; \
- y = 0; \
- } \
- } while (0)
+#define ECORE_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ bnx2x_dma_free((struct bnx2x_dma *)x); \
+ rte_free(x); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index c8b08bc3..dd70ac6c 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -440,7 +440,7 @@ struct elink_params {
#define ELINK_EEE_MODE_OUTPUT_TIME (1 << 28)
#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1 << 29)
#define ELINK_EEE_MODE_ENABLE_LPI (1 << 30)
-#define ELINK_EEE_MODE_ADV_LPI (1 << 31)
+#define ELINK_EEE_MODE_ADV_LPI (1U << 31)
uint16_t hw_led_mode; /* part of the hw_config read from the shmem */
uint32_t multi_phy_config;
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index f75b0ad3..5535c376 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -263,7 +263,7 @@ struct bnxt {
#define BNXT_FLAG_TRUSTED_VF_EN (1 << 11)
#define BNXT_FLAG_DFLT_VNIC_SET (1 << 12)
#define BNXT_FLAG_NEW_RM (1 << 30)
-#define BNXT_FLAG_INIT_DONE (1 << 31)
+#define BNXT_FLAG_INIT_DONE (1U << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 801c6ffa..e26b9e3c 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -2649,7 +2649,7 @@ static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
return -ERANGE;
}
win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
- rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
+ rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
return 0;
}
@@ -2680,10 +2680,10 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
static void bnxt_unmap_ptp_regs(struct bnxt *bp)
{
- rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
- BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
- rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
- BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
+ rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
+ rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
}
static uint64_t bnxt_cc_read(struct bnxt *bp)
@@ -2733,8 +2733,8 @@ static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
return -EAGAIN;
port_id = pf->port_id;
- rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
- ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
+ rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
@@ -3242,10 +3242,8 @@ skip_init:
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(WARNING,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(WARNING,
- "Using rte_mem_virt2iova()\n");
+ PMD_DRV_LOG(INFO,
+ "Memzone physical address same as virtual using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
PMD_DRV_LOG(ERR,
@@ -3548,7 +3546,7 @@ static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver bnxt_rte_pmd = {
.id_table = bnxt_pci_id_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
- RTE_PCI_DRV_INTR_LSC,
+ RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_IOVA_AS_VA,
.probe = bnxt_pci_probe,
.remove = bnxt_pci_remove,
};
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 5345d393..17e2909a 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -100,7 +100,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
}
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- PMD_DRV_LOG(ERR, "pools = %u nb_q_per_grp = %u\n", pools, nb_q_per_grp);
+ PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
+ pools, nb_q_per_grp);
start_grp_id = 0;
end_grp_id = nb_q_per_grp;
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 1bfc63d9..dc695e17 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -154,7 +154,7 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
if (tpa_start1->flags2 &
rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
if (likely(tpa_start1->flags2 &
rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
@@ -437,7 +437,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
(RX_PKT_CMPL_METADATA_VID_MASK |
RX_PKT_CMPL_METADATA_DE |
RX_PKT_CMPL_METADATA_PRI_MASK);
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index dd847c6f..1e6a3fc7 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -664,7 +664,7 @@ max_index(uint64_t *a, int n)
* @param port_pos Port to assign.
*/
static void
-selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
+selection_logic(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *agg, *port;
uint16_t slaves_count, new_agg_id, i, j = 0;
@@ -781,16 +781,23 @@ link_speed_key(uint16_t speed) {
}
static void
-rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
+rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id,
struct rte_mbuf *lacp_pkt) {
struct lacpdu_header *lacp;
+ struct lacpdu_actor_partner_params *partner;
if (lacp_pkt != NULL) {
lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
- /* This is LACP frame so pass it to rx_machine */
- rx_machine(internals, slave_id, &lacp->lacpdu);
+ partner = &lacp->lacpdu.partner;
+ if (is_same_ether_addr(&partner->port_params.system,
+ &internals->mode4.mac_addr)) {
+ /* This LACP frame is sending to the bonding port
+ * so pass it to rx_machine.
+ */
+ rx_machine(internals, slave_id, &lacp->lacpdu);
+ }
rte_pktmbuf_free(lacp_pkt);
} else
rx_machine(internals, slave_id, NULL);
@@ -805,8 +812,8 @@ bond_mode_8023ad_periodic_cb(void *arg)
struct rte_eth_link link_info;
struct ether_addr slave_addr;
struct rte_mbuf *lacp_pkt = NULL;
-
- uint8_t i, slave_id;
+ uint16_t slave_id;
+ uint16_t i;
/* Update link status on each port */
@@ -1149,7 +1156,7 @@ int
bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
{
struct bond_dev_private *internals = bond_dev->data->dev_private;
- uint8_t i;
+ uint16_t i;
for (i = 0; i < internals->active_slave_count; i++)
bond_mode_8023ad_activate_slave(bond_dev,
@@ -1165,6 +1172,7 @@ bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
struct mode8023ad_private *mode4 = &internals->mode4;
static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
+ rte_eth_macaddr_get(internals->port_id, &mode4->mac_addr);
if (mode4->slowrx_cb)
return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
bond_dev);
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index c51426b8..f91902eb 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -150,6 +150,7 @@ struct mode8023ad_private {
uint64_t update_timeout_us;
rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
uint8_t external_sm;
+ struct ether_addr mac_addr;
struct rte_eth_link slave_link;
/***< slave link properties */
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index c3891c7e..d3e16d4b 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -18,10 +18,10 @@ simple_hash(uint8_t *hash_start, int hash_size)
return hash;
}
-static uint8_t
+static uint16_t
calculate_slave(struct bond_dev_private *internals)
{
- uint8_t idx;
+ uint16_t idx;
idx = (internals->mode6.last_slave + 1) % internals->active_slave_count;
internals->mode6.last_slave = idx;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index ac084c4f..a23988dc 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -76,7 +76,7 @@ void
activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t active_count = internals->active_slave_count;
+ uint16_t active_count = internals->active_slave_count;
if (internals->mode == BONDING_MODE_8023AD)
bond_mode_8023ad_activate_slave(eth_dev, port_id);
@@ -490,10 +490,6 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
}
}
- /* Inherit eth dev link properties from first slave */
- link_properties_set(bonded_eth_dev,
- &(slave_eth_dev->data->dev_link));
-
/* Make primary slave */
internals->primary_port = slave_port_id;
internals->current_primary_port = slave_port_id;
@@ -800,7 +796,7 @@ rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
uint16_t len)
{
struct bond_dev_private *internals;
- uint8_t i;
+ uint16_t i;
if (valid_bonded_port_id(bonded_port_id) != 0)
return -1;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 7ed69b38..154257ff 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -353,7 +353,7 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
for (i = 0; i < nb_bufs; i++) {
/* Populate slave mbuf arrays with mbufs for that slave. */
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
@@ -404,8 +404,10 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
- uint8_t i, j, k;
uint8_t subtype;
+ uint16_t i;
+ uint16_t j;
+ uint16_t k;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
@@ -487,35 +489,31 @@ uint32_t burstnumberTX;
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
static void
-arp_op_name(uint16_t arp_op, char *buf)
+arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
{
switch (arp_op) {
case ARP_OP_REQUEST:
- snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
+ snprintf(buf, buf_len, "%s", "ARP Request");
return;
case ARP_OP_REPLY:
- snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
+ snprintf(buf, buf_len, "%s", "ARP Reply");
return;
case ARP_OP_REVREQUEST:
- snprintf(buf, sizeof("Reverse ARP Request"), "%s",
- "Reverse ARP Request");
+ snprintf(buf, buf_len, "%s", "Reverse ARP Request");
return;
case ARP_OP_REVREPLY:
- snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
- "Reverse ARP Reply");
+ snprintf(buf, buf_len, "%s", "Reverse ARP Reply");
return;
case ARP_OP_INVREQUEST:
- snprintf(buf, sizeof("Peer Identify Request"), "%s",
- "Peer Identify Request");
+ snprintf(buf, buf_len, "%s", "Peer Identify Request");
return;
case ARP_OP_INVREPLY:
- snprintf(buf, sizeof("Peer Identify Reply"), "%s",
- "Peer Identify Reply");
+ snprintf(buf, buf_len, "%s", "Peer Identify Reply");
return;
default:
break;
}
- snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
+ snprintf(buf, buf_len, "%s", "Unknown");
return;
}
#endif
@@ -619,7 +617,8 @@ mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
- arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
+ arp_op_name(rte_be_to_cpu_16(arp_h->arp_op),
+ ArpOp, sizeof(ArpOp));
MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
}
#endif
@@ -774,7 +773,7 @@ ipv6_hash(struct ipv6_hdr *ipv6_hdr)
void
burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
struct ether_hdr *eth_hdr;
uint32_t hash;
@@ -791,7 +790,7 @@ burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
void
burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
uint16_t i;
struct ether_hdr *eth_hdr;
@@ -829,7 +828,7 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
void
burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
struct ether_hdr *eth_hdr;
uint16_t proto;
@@ -899,7 +898,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
struct bwg_slave {
uint64_t bwg_left_int;
uint64_t bwg_left_remainder;
- uint8_t slave;
+ uint16_t slave;
};
void
@@ -952,11 +951,12 @@ bond_ethdev_update_tlb_slave_cb(void *arg)
struct bond_dev_private *internals = arg;
struct rte_eth_stats slave_stats;
struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
+ uint16_t slave_count;
uint64_t tx_bytes;
uint8_t update_stats = 0;
- uint8_t i, slave_id;
+ uint16_t slave_id;
+ uint16_t i;
internals->slave_update_idx++;
@@ -1243,7 +1243,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
for (i = 0; i < nb_bufs; i++) {
/* Populate slave mbuf arrays with mbufs for that slave. */
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
@@ -1298,9 +1298,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint16_t i;
- if (unlikely(nb_bufs == 0))
- return 0;
-
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
@@ -1310,6 +1307,30 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ /* Check for LACP control packets and send if available */
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
+ struct rte_mbuf *ctrl_pkt = NULL;
+
+ if (likely(rte_ring_empty(port->tx_ring)))
+ continue;
+
+ if (rte_ring_dequeue(port->tx_ring,
+ (void **)&ctrl_pkt) != -ENOENT) {
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, &ctrl_pkt, 1);
+ /*
+ * re-enqueue LAG control plane packets to buffering
+ * ring if transmission fails so the packet isn't lost.
+ */
+ if (slave_tx_count != 1)
+ rte_ring_enqueue(port->tx_ring, ctrl_pkt);
+ }
+ }
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
@@ -1319,7 +1340,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
slave_port_ids[i];
}
- if (likely(dist_slave_count > 1)) {
+ if (likely(dist_slave_count > 0)) {
/*
* Populate slaves mbuf with the packets which are to be sent
@@ -1333,7 +1354,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
* Populate slave mbuf arrays with mbufs for that
* slave
*/
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
bufs[i];
@@ -1365,27 +1386,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
}
}
- /* Check for LACP control packets and send if available */
- for (i = 0; i < slave_count; i++) {
- struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
- struct rte_mbuf *ctrl_pkt = NULL;
-
- if (likely(rte_ring_empty(port->tx_ring)))
- continue;
-
- if (rte_ring_dequeue(port->tx_ring,
- (void **)&ctrl_pkt) != -ENOENT) {
- slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
- bd_tx_q->queue_id, &ctrl_pkt, 1);
- /*
- * re-enqueue LAG control plane packets to buffering
- * ring if transmission fails so the packet isn't lost.
- */
- if (slave_tx_count != 1)
- rte_ring_enqueue(port->tx_ring, ctrl_pkt);
- }
- }
-
return total_tx_count;
}
@@ -1396,8 +1396,9 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t tx_failed_flag = 0, num_of_slaves;
uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint8_t tx_failed_flag = 0;
+ uint16_t num_of_slaves;
uint16_t max_nb_of_tx_pkts = 0;
@@ -1449,7 +1450,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
return max_nb_of_tx_pkts;
}
-void
+static void
link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
{
struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
@@ -1474,7 +1475,7 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
}
}
-int
+static int
link_properties_valid(struct rte_eth_dev *ethdev,
struct rte_eth_link *slave_link)
{
@@ -1948,7 +1949,7 @@ void
slave_remove(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev)
{
- uint8_t i;
+ uint16_t i;
for (i = 0; i < internals->slave_count; i++)
if (internals->slaves[i].port_id ==
@@ -2124,7 +2125,7 @@ out_err:
static void
bond_ethdev_free_queues(struct rte_eth_dev *dev)
{
- uint8_t i;
+ uint16_t i;
if (dev->data->rx_queues != NULL) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -2147,7 +2148,7 @@ void
bond_ethdev_stop(struct rte_eth_dev *eth_dev)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t i;
+ uint16_t i;
if (internals->mode == BONDING_MODE_8023AD) {
struct port *port;
@@ -2196,7 +2197,7 @@ void
bond_ethdev_close(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
- uint8_t bond_port_id = internals->port_id;
+ uint16_t bond_port_id = internals->port_id;
int skipped = 0;
struct rte_flow_error ferror;
@@ -2228,6 +2229,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
uint16_t max_nb_rx_queues = UINT16_MAX;
uint16_t max_nb_tx_queues = UINT16_MAX;
+ uint16_t max_rx_desc_lim = UINT16_MAX;
+ uint16_t max_tx_desc_lim = UINT16_MAX;
dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
@@ -2241,7 +2244,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
*/
if (internals->slave_count > 0) {
struct rte_eth_dev_info slave_info;
- uint8_t idx;
+ uint16_t idx;
for (idx = 0; idx < internals->slave_count; idx++) {
rte_eth_dev_info_get(internals->slaves[idx].port_id,
@@ -2252,6 +2255,12 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (slave_info.max_tx_queues < max_nb_tx_queues)
max_nb_tx_queues = slave_info.max_tx_queues;
+
+ if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
+ max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
+
+ if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
+ max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
}
}
@@ -2263,10 +2272,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
memcpy(&dev_info->default_txconf, &internals->default_txconf,
sizeof(dev_info->default_txconf));
- memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim,
- sizeof(dev_info->rx_desc_lim));
- memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim,
- sizeof(dev_info->tx_desc_lim));
+ dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
+ dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
/**
* If dedicated hw queues enabled for link bonding device in LACP mode
@@ -2593,6 +2600,9 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
case BONDING_MODE_TLB:
case BONDING_MODE_ALB:
default:
+ /* Do not touch promisc when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
rte_eth_promiscuous_enable(internals->current_primary_port);
}
}
@@ -2621,6 +2631,9 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
case BONDING_MODE_TLB:
case BONDING_MODE_ALB:
default:
+ /* Do not touch promisc when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
rte_eth_promiscuous_disable(internals->current_primary_port);
}
}
@@ -2644,14 +2657,15 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
struct rte_eth_link link;
int rc = -1;
- int i, valid_slave = 0;
- uint8_t active_pos;
uint8_t lsc_flag = 0;
+ int valid_slave = 0;
+ uint16_t active_pos;
+ uint16_t i;
if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
return rc;
- bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
+ bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
if (check_for_bonded_ethdev(bonded_eth_dev))
return rc;
@@ -2687,16 +2701,6 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
if (active_pos < internals->active_slave_count)
goto link_update;
- /* if no active slave ports then set this port to be primary port */
- if (internals->active_slave_count < 1) {
- /* If first active slave, then change link status */
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
- internals->current_primary_port = port_id;
- lsc_flag = 1;
-
- mac_address_slaves_update(bonded_eth_dev);
- }
-
/* check link state properties if bonded link is up*/
if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
if (link_properties_valid(bonded_eth_dev, &link) != 0)
@@ -2708,9 +2712,24 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
link_properties_set(bonded_eth_dev, &link);
}
+ /* If no active slave ports then set this port to be
+ * the primary port.
+ */
+ if (internals->active_slave_count < 1) {
+ /* If first active slave, then change link status */
+ bonded_eth_dev->data->dev_link.link_status =
+ ETH_LINK_UP;
+ internals->current_primary_port = port_id;
+ lsc_flag = 1;
+
+ mac_address_slaves_update(bonded_eth_dev);
+ }
+
activate_slave(bonded_eth_dev, port_id);
- /* If user has defined the primary port then default to using it */
+ /* If the user has defined the primary port then default to
+ * using it.
+ */
if (internals->user_defined_primary_port &&
internals->primary_port == port_id)
bond_ethdev_primary_set(internals, port_id);
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 3ea5d686..8afef39b 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -100,7 +100,7 @@ struct rte_flow {
};
typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
/** Link Bonding PMD device private configuration Structure */
struct bond_dev_private {
@@ -222,13 +222,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
void
activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
-void
-link_properties_set(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_link *slave_dev_link);
-int
-link_properties_valid(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_link *slave_dev_link);
-
int
mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr);
@@ -263,15 +256,15 @@ slave_add(struct bond_dev_private *internals,
void
burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
void
burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
void
burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves);
+ uint16_t slave_count, uint16_t *slaves);
void
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 701e0b1f..774dd082 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -4246,7 +4246,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
V_FW_CMD_EXEC(0) |
V_FW_VI_MAC_CMD_VIID(viid));
raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
- c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
+ c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
raw |
V_FW_CMD_LEN16(1));
diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
index 5f5cbe04..f5f027a2 100644
--- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h
+++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Chelsio Communications.
+ * Copyright(c) 2014-2019 Chelsio Communications.
* All rights reserved.
*/
@@ -103,6 +103,12 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -116,19 +122,63 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x509A), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509B), /* Custom T540-CR LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR SFP+ LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR SFP+ */
+ CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a5), /* Custom T522-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50a6), /* Custom T522-BT-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */
/* T6 adapter */
CH_PCI_ID_TABLE_FENTRY(0x6001), /* T6225-CR */
CH_PCI_ID_TABLE_FENTRY(0x6002), /* T6225-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x6003), /* T6425-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6004), /* T6425-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x6005), /* T6225-OCP */
+ CH_PCI_ID_TABLE_FENTRY(0x6006), /* T62100-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x6007), /* T62100-LP-CR */
CH_PCI_ID_TABLE_FENTRY(0x6008), /* T62100-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6009), /* T6210-BT */
CH_PCI_ID_TABLE_FENTRY(0x600d), /* T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6011), /* T6225-LL-CR */
CH_PCI_ID_TABLE_FENTRY(0x6014), /* T61100-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6015), /* T6201-BT */
CH_PCI_ID_TABLE_FENTRY(0x6080), /* Custom T6225-CR SFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6081), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index eb58f880..cf262134 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -52,7 +52,7 @@
#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
-bool force_linkup(struct adapter *adap);
+bool cxgbe_force_linkup(struct adapter *adap);
int cxgbe_probe(struct adapter *adapter);
int cxgbevf_probe(struct adapter *adapter);
void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
@@ -65,19 +65,17 @@ void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
void cxgbe_stats_reset(struct port_info *pi);
int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
unsigned int cnt, struct t4_completion *c);
-int link_start(struct port_info *pi);
-void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
- unsigned int cnt, unsigned int size, unsigned int iqe_size);
-int setup_sge_fwevtq(struct adapter *adapter);
-int setup_sge_ctrl_txq(struct adapter *adapter);
-void cfg_queues(struct rte_eth_dev *eth_dev);
-int cfg_queue_count(struct rte_eth_dev *eth_dev);
-int init_rss(struct adapter *adap);
-int setup_rss(struct port_info *pi);
+int cxgbe_link_start(struct port_info *pi);
+int cxgbe_setup_sge_fwevtq(struct adapter *adapter);
+int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter);
+void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev);
+int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev);
+int cxgbe_init_rss(struct adapter *adap);
+int cxgbe_setup_rss(struct port_info *pi);
void cxgbe_enable_rx_queues(struct port_info *pi);
-void print_port_info(struct adapter *adap);
-void print_adapter_info(struct adapter *adap);
+void cxgbe_print_port_info(struct adapter *adap);
+void cxgbe_print_adapter_info(struct adapter *adap);
int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
-void configure_max_ethqsets(struct adapter *adapter);
+void cxgbe_configure_max_ethqsets(struct adapter *adapter);
#endif /* _CXGBE_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index b2f83ea3..7babdfb4 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -200,7 +200,8 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
/* Exit if link status changed or always forced up */
- if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
+ if (pi->link_cfg.link_ok != old_link ||
+ cxgbe_force_linkup(adapter))
break;
if (!wait_to_complete)
@@ -209,7 +210,7 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
}
- new_link.link_status = force_linkup(adapter) ?
+ new_link.link_status = cxgbe_force_linkup(adapter) ?
ETH_LINK_UP : pi->link_cfg.link_ok;
new_link.link_autoneg = pi->link_cfg.autoneg;
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -356,7 +357,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
cxgbe_enable_rx_queues(pi);
- err = setup_rss(pi);
+ err = cxgbe_setup_rss(pi);
if (err)
goto out;
@@ -372,7 +373,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
goto out;
}
- err = link_start(pi);
+ err = cxgbe_link_start(pi);
if (err)
goto out;
@@ -412,18 +413,18 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
CXGBE_FUNC_TRACE();
if (!(adapter->flags & FW_QUEUE_BOUND)) {
- err = setup_sge_fwevtq(adapter);
+ err = cxgbe_setup_sge_fwevtq(adapter);
if (err)
return err;
adapter->flags |= FW_QUEUE_BOUND;
if (is_pf4(adapter)) {
- err = setup_sge_ctrl_txq(adapter);
+ err = cxgbe_setup_sge_ctrl_txq(adapter);
if (err)
return err;
}
}
- err = cfg_queue_count(eth_dev);
+ err = cxgbe_cfg_queue_count(eth_dev);
if (err)
return err;
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 5fa6cdd0..6a3cbc1e 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -116,7 +116,7 @@ out:
/**
* Setup sge control queues to pass control information.
*/
-int setup_sge_ctrl_txq(struct adapter *adapter)
+int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int err = 0, i = 0;
@@ -190,7 +190,7 @@ int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms,
return -ETIMEDOUT;
}
-int setup_sge_fwevtq(struct adapter *adapter)
+int cxgbe_setup_sge_fwevtq(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int err = 0;
@@ -465,7 +465,7 @@ static inline bool is_x_10g_port(const struct link_config *lc)
return high_speeds != 0;
}
-inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
{
@@ -475,7 +475,7 @@ inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
q->size = size;
}
-int cfg_queue_count(struct rte_eth_dev *eth_dev)
+int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adap = pi->adapter;
@@ -502,7 +502,7 @@ int cfg_queue_count(struct rte_eth_dev *eth_dev)
return 0;
}
-void cfg_queues(struct rte_eth_dev *eth_dev)
+void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
{
struct rte_config *config = rte_eal_get_configuration();
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -596,7 +596,7 @@ static void setup_memwin(struct adapter *adap)
MEMWIN_NIC));
}
-int init_rss(struct adapter *adap)
+int cxgbe_init_rss(struct adapter *adap)
{
unsigned int i;
@@ -623,7 +623,7 @@ int init_rss(struct adapter *adap)
/**
* Dump basic information about the adapter.
*/
-void print_adapter_info(struct adapter *adap)
+void cxgbe_print_adapter_info(struct adapter *adap)
{
/**
* Hardware/Firmware/etc. Version/Revision IDs.
@@ -631,7 +631,7 @@ void print_adapter_info(struct adapter *adap)
t4_dump_version_info(adap);
}
-void print_port_info(struct adapter *adap)
+void cxgbe_print_port_info(struct adapter *adap)
{
int i;
char buf[80];
@@ -779,7 +779,7 @@ static void configure_pcie_ext_tag(struct adapter *adapter)
}
/* Figure out how many Queue Sets we can support */
-void configure_max_ethqsets(struct adapter *adapter)
+void cxgbe_configure_max_ethqsets(struct adapter *adapter)
{
unsigned int ethqsets;
@@ -1268,7 +1268,7 @@ static int adap_init0(struct adapter *adap)
t4_init_tp_params(adap);
configure_pcie_ext_tag(adap);
configure_vlan_types(adap);
- configure_max_ethqsets(adap);
+ cxgbe_configure_max_ethqsets(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -1322,7 +1322,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
pi->port_id, pi->mod_type);
}
-inline bool force_linkup(struct adapter *adap)
+bool cxgbe_force_linkup(struct adapter *adap)
{
struct rte_pci_device *pdev = adap->pdev;
@@ -1340,7 +1340,7 @@ inline bool force_linkup(struct adapter *adap)
*
* Performs the MAC and PHY actions needed to enable a port.
*/
-int link_start(struct port_info *pi)
+int cxgbe_link_start(struct port_info *pi)
{
struct adapter *adapter = pi->adapter;
u64 conf_offloads;
@@ -1382,7 +1382,7 @@ int link_start(struct port_info *pi)
true, true, false);
}
- if (ret == 0 && force_linkup(adapter))
+ if (ret == 0 && cxgbe_force_linkup(adapter))
pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
return ret;
}
@@ -1490,7 +1490,7 @@ int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
* We always configure the RSS mapping for all ports since the mapping
* table has plenty of entries.
*/
-int setup_rss(struct port_info *pi)
+int cxgbe_setup_rss(struct port_info *pi)
{
int j, err;
struct adapter *adapter = pi->adapter;
@@ -1864,10 +1864,10 @@ allocate_mac:
}
}
- cfg_queues(adapter->eth_dev);
+ cxgbe_cfg_queues(adapter->eth_dev);
- print_adapter_info(adapter);
- print_port_info(adapter);
+ cxgbe_print_adapter_info(adapter);
+ cxgbe_print_port_info(adapter);
adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
adapter->clipt_end);
@@ -1909,7 +1909,7 @@ allocate_mac:
"Maskless filter support disabled. Continuing\n");
}
- err = init_rss(adapter);
+ err = cxgbe_init_rss(adapter);
if (err)
goto out_free;
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
index 61bd8519..c46bc98a 100644
--- a/drivers/net/cxgbe/cxgbevf_main.c
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -50,7 +50,7 @@ static void size_nports_qsets(struct adapter *adapter)
adapter->params.nports = pmask_nports;
}
- configure_max_ethqsets(adapter);
+ cxgbe_configure_max_ethqsets(adapter);
if (adapter->sge.max_ethqsets < adapter->params.nports) {
dev_warn(adapter->pdev_dev, "only using %d of %d available"
" virtual interfaces (too few Queue Sets)\n",
@@ -268,16 +268,16 @@ allocate_mac:
}
}
- cfg_queues(adapter->eth_dev);
- print_adapter_info(adapter);
- print_port_info(adapter);
+ cxgbe_cfg_queues(adapter->eth_dev);
+ cxgbe_print_adapter_info(adapter);
+ cxgbe_print_port_info(adapter);
adapter->mpstcam = t4_init_mpstcam(adapter);
if (!adapter->mpstcam)
dev_warn(adapter,
"VF could not allocate mps tcam table. Continuing\n");
- err = init_rss(adapter);
+ err = cxgbe_init_rss(adapter);
if (err)
goto out_free;
return 0;
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index f9d2d48a..663c0a79 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1604,6 +1604,52 @@ static inline void rspq_next(struct sge_rspq *q)
}
}
+static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
+ uint64_t ol_flags)
+{
+ pkt->packet_type |= ptype;
+ pkt->ol_flags |= ol_flags;
+}
+
+static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
+ const struct cpl_rx_pkt *cpl,
+ struct rte_mbuf *pkt)
+{
+ bool csum_ok;
+ u16 err_vec;
+
+ if (adap->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ if (cpl->vlan_ex)
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ else
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
+
+ if (cpl->l2info & htonl(F_RXF_IP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_IP6))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+
+ if (cpl->l2info & htonl(F_RXF_TCP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_UDP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+}
+
/**
* process_responses - process responses from an SGE response queue
* @q: the ingress queue to process
@@ -1655,8 +1701,6 @@ static int process_responses(struct sge_rspq *q, int budget,
(const void *)&q->cur_desc[1];
struct rte_mbuf *pkt, *npkt;
u32 len, bufsz;
- bool csum_ok;
- u16 err_vec;
rc = (const struct rsp_ctrl *)
((const char *)q->cur_desc +
@@ -1673,16 +1717,6 @@ static int process_responses(struct sge_rspq *q, int budget,
len = G_RSPD_LEN(len);
pkt->pkt_len = len;
- /* Compressed error vector is enabled for
- * T6 only
- */
- if (q->adapter->params.tp.rx_pkt_encap)
- err_vec = G_T6_COMPR_RXERR_VEC(
- ntohs(cpl->err_vec));
- else
- err_vec = ntohs(cpl->err_vec);
- csum_ok = cpl->csum_calc && !err_vec;
-
/* Chain mbufs into len if necessary */
while (len) {
struct rte_mbuf *new_pkt = rsd->buf;
@@ -1700,20 +1734,7 @@ static int process_responses(struct sge_rspq *q, int budget,
npkt->next = NULL;
pkt->nb_segs--;
- if (cpl->l2info & htonl(F_RXF_IP)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV4;
- if (unlikely(!csum_ok))
- pkt->ol_flags |=
- PKT_RX_IP_CKSUM_BAD;
-
- if ((cpl->l2info &
- htonl(F_RXF_UDP | F_RXF_TCP)) &&
- !csum_ok)
- pkt->ol_flags |=
- PKT_RX_L4_CKSUM_BAD;
- } else if (cpl->l2info & htonl(F_RXF_IP6)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV6;
- }
+ cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
if (!rss_hdr->filter_tid &&
rss_hdr->hash_type) {
@@ -1722,11 +1743,8 @@ static int process_responses(struct sge_rspq *q, int budget,
ntohl(rss_hdr->hash_val);
}
- if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED;
+ if (cpl->vlan_ex)
pkt->vlan_tci = ntohs(cpl->vlan);
- }
rte_pktmbuf_adj(pkt, s->pktshift);
rxq->stats.pkts++;
diff --git a/drivers/net/dpaa2/dpaa2_pmd_logs.h b/drivers/net/dpaa2/dpaa2_pmd_logs.h
index c04babdb..c47ba8e1 100644
--- a/drivers/net/dpaa2/dpaa2_pmd_logs.h
+++ b/drivers/net/dpaa2/dpaa2_pmd_logs.h
@@ -1,5 +1,4 @@
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 NXP
*/
diff --git a/drivers/net/e1000/base/e1000_82575.h b/drivers/net/e1000/base/e1000_82575.h
index 4133cdd8..6f2b22c1 100644
--- a/drivers/net/e1000/base/e1000_82575.h
+++ b/drivers/net/e1000/base/e1000_82575.h
@@ -383,7 +383,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_ETQF_FILTER_ENABLE (1 << 26)
#define E1000_ETQF_IMM_INT (1 << 29)
#define E1000_ETQF_1588 (1 << 30)
-#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
+#define E1000_ETQF_QUEUE_ENABLE (1U << 31)
/*
* ETQF filter list: one static filter per filter consumer. This is
* to avoid filter collisions later. Add new filters
@@ -410,7 +410,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
#define E1000_DTXSWC_LLE_SHIFT 16
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1U << 31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c
index 92ab6fc6..2654a18a 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.c
+++ b/drivers/net/e1000/base/e1000_ich8lan.c
@@ -5166,7 +5166,7 @@ STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Device Status */
if (hw->mac.type == e1000_ich8lan) {
reg = E1000_READ_REG(hw, E1000_STATUS);
- reg &= ~(1 << 31);
+ reg &= ~(1U << 31);
E1000_WRITE_REG(hw, E1000_STATUS, reg);
}
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 023fe751..a21205af 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -490,15 +490,15 @@ enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
ENETC_RTBLENR_LEN(rx_ring->bd_count));
rx_ring->mb_pool = mb_pool;
- /* enable ring */
- enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
- enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
rx_ring->rcir = (void *)((size_t)hw->reg +
ENETC_BDR(RX, idx, ENETC_RBCIR));
enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
RTE_PKTMBUF_HEADROOM);
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
+ /* enable ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
}
static int
diff --git a/drivers/net/enetc/enetc_rxtx.c b/drivers/net/enetc/enetc_rxtx.c
index 631e2430..ce5a542a 100644
--- a/drivers/net/enetc/enetc_rxtx.c
+++ b/drivers/net/enetc/enetc_rxtx.c
@@ -49,11 +49,16 @@ enetc_xmit_pkts(void *tx_queue,
uint16_t nb_pkts)
{
struct enetc_swbd *tx_swbd;
- int i, start;
+ int i, start, bds_to_use;
struct enetc_tx_bd *txbd;
struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
i = tx_ring->next_to_use;
+
+ bds_to_use = enetc_bd_unused(tx_ring);
+ if (bds_to_use < nb_pkts)
+ nb_pkts = bds_to_use;
+
start = 0;
while (nb_pkts--) {
enetc_clean_tx_ring(tx_ring);
@@ -88,8 +93,9 @@ enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
rx_swbd = &rx_ring->q_swbd[i];
rxbd = ENETC_RXBD(*rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
- rx_swbd->buffer_addr =
- rte_cpu_to_le_64(rte_mbuf_raw_alloc(rx_ring->mb_pool));
+ rx_swbd->buffer_addr = (void *)(uintptr_t)
+ rte_cpu_to_le_64((uint64_t)(uintptr_t)
+ rte_pktmbuf_alloc(rx_ring->mb_pool));
rxbd->w.addr = (uint64_t)(uintptr_t)
rx_swbd->buffer_addr->buf_addr +
rx_swbd->buffer_addr->data_off;
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 7bca3cad..377f607f 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -78,8 +78,8 @@ struct enic_fdir {
u32 modes;
u32 types_mask;
void (*copy_fltr_fn)(struct filter_v2 *filt,
- struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
};
struct enic_soft_stats {
@@ -201,8 +201,8 @@ struct enic {
/* Compute ethdev's max packet size from MTU */
static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
{
- /* ethdev max size includes eth and crc whereas NIC MTU does not */
- return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ /* ethdev max size includes eth whereas NIC MTU does not */
+ return mtu + ETHER_HDR_LEN;
}
/* Get the CQ index from a Start of Packet(SOP) RQ index */
@@ -340,9 +340,5 @@ int enic_link_update(struct enic *enic);
bool enic_use_vector_rx_handler(struct enic *enic);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
-void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
-void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
extern const struct rte_flow_ops enic_flow_ops;
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 9e9e548c..48c8e626 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -36,6 +36,13 @@
#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
+static void copy_fltr_v1(struct filter_v2 *fltr,
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
+static void copy_fltr_v2(struct filter_v2 *fltr,
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
+
void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
{
*stats = enic->fdir.stats;
@@ -79,9 +86,9 @@ enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
* without advanced filter support.
*/
-void
-copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- __rte_unused struct rte_eth_fdir_masks *masks)
+static void
+copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
+ __rte_unused const struct rte_eth_fdir_masks *masks)
{
fltr->type = FILTER_IPV4_5TUPLE;
fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
@@ -104,9 +111,9 @@ copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
/* Copy Flow Director filter to a VIC generic filter (requires advanced
* filter support.
*/
-void
-copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks)
+static void
+copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks)
{
struct filter_generic_1 *gp = &fltr->u.generic_1;
@@ -163,9 +170,11 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
sctp_val.tag = input->flow.sctp4_flow.verify_tag;
}
- /* v4 proto should be 132, override ip4_flow.proto */
- input->flow.ip4_flow.proto = 132;
-
+ /*
+ * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware
+ * has no "packet is SCTP" flag. Use flag=0 (generic L4) and
+ * manually set proto_id=sctp below.
+ */
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
&sctp_val, sizeof(struct sctp_hdr));
}
@@ -189,6 +198,10 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
if (input->flow.ip4_flow.proto) {
ip4_mask.next_proto_id = masks->ipv4_mask.proto;
ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ /* Explicitly match the SCTP protocol number */
+ ip4_mask.next_proto_id = 0xff;
+ ip4_val.next_proto_id = IPPROTO_SCTP;
}
if (input->flow.ip4_flow.src_ip) {
ip4_mask.src_addr = masks->ipv4_mask.src_ip;
@@ -251,9 +264,6 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
sctp_val.tag = input->flow.sctp6_flow.verify_tag;
}
- /* v4 proto should be 132, override ipv6_flow.proto */
- input->flow.ipv6_flow.proto = 132;
-
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
&sctp_val, sizeof(struct sctp_hdr));
}
@@ -269,6 +279,10 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
if (input->flow.ipv6_flow.proto) {
ipv6_mask.proto = masks->ipv6_mask.proto;
ipv6_val.proto = input->flow.ipv6_flow.proto;
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ /* See comments for IPv4 SCTP above. */
+ ipv6_mask.proto = 0xff;
+ ipv6_val.proto = IPPROTO_SCTP;
}
memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
sizeof(ipv6_mask.src_addr));
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index bb9ed037..dbc8de83 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -23,33 +23,54 @@
rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
fmt "\n", ##args)
+/*
+ * Common arguments passed to copy_item functions. Use this structure
+ * so we can easily add new arguments.
+ * item: Item specification.
+ * filter: Partially filled in NIC filter structure.
+ * inner_ofst: If zero, this is an outer header. If non-zero, this is
+ * the offset into L5 where the header begins.
+ * l2_proto_off: offset to EtherType eth or vlan header.
+ * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
+ */
+struct copy_item_args {
+ const struct rte_flow_item *item;
+ struct filter_v2 *filter;
+ uint8_t *inner_ofst;
+ uint8_t l2_proto_off;
+ uint8_t l3_proto_off;
+ struct enic *enic;
+};
+
+/* functions for copying items into enic filters */
+typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
+
/** Info about how to copy items into enic filters. */
struct enic_items {
/** Function for copying and validating an item. */
- int (*copy_item)(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst);
+ enic_copy_item_fn *copy_item;
/** List of valid previous items. */
const enum rte_flow_item_type * const prev_items;
/** True if it's OK for this item to be the first item. For some NIC
* versions, it's invalid to start the stack above layer 3.
*/
const u8 valid_start_item;
+ /* Inner packet version of copy_item. */
+ enic_copy_item_fn *inner_copy_item;
};
/** Filtering capabilities for various NIC and firmware versions. */
struct enic_filter_cap {
/** list of valid items and their handlers and attributes. */
const struct enic_items *item_info;
+ /* Max type in the above list, used to detect unsupported types */
+ enum rte_flow_item_type max_item_type;
};
/* functions for copying flow actions into enic actions */
typedef int (copy_action_fn)(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action);
-/* functions for copying items into enic filters */
-typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst);
-
/** Action capabilities for various NICs. */
struct enic_action_cap {
/** list of valid actions */
@@ -70,8 +91,13 @@ static enic_copy_item_fn enic_copy_item_ipv6_v2;
static enic_copy_item_fn enic_copy_item_udp_v2;
static enic_copy_item_fn enic_copy_item_tcp_v2;
static enic_copy_item_fn enic_copy_item_sctp_v2;
-static enic_copy_item_fn enic_copy_item_sctp_v2;
static enic_copy_item_fn enic_copy_item_vxlan_v2;
+static enic_copy_item_fn enic_copy_item_inner_eth_v2;
+static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
+static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
+static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
+static enic_copy_item_fn enic_copy_item_inner_udp_v2;
+static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
static copy_action_fn enic_copy_action_v1;
static copy_action_fn enic_copy_action_v2;
@@ -86,6 +112,7 @@ static const struct enic_items enic_items_v1[] = {
.prev_items = (const enum rte_flow_item_type[]) {
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.copy_item = enic_copy_item_udp_v1,
@@ -94,6 +121,7 @@ static const struct enic_items enic_items_v1[] = {
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.copy_item = enic_copy_item_tcp_v1,
@@ -102,6 +130,7 @@ static const struct enic_items enic_items_v1[] = {
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
};
@@ -117,6 +146,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_VXLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_eth_v2,
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.copy_item = enic_copy_item_vlan_v2,
@@ -125,6 +155,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_vlan_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV4] = {
.copy_item = enic_copy_item_ipv4_v2,
@@ -134,6 +165,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv4_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.copy_item = enic_copy_item_ipv6_v2,
@@ -143,6 +175,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv6_v2,
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.copy_item = enic_copy_item_udp_v2,
@@ -152,6 +185,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_udp_v2,
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.copy_item = enic_copy_item_tcp_v2,
@@ -161,6 +195,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_tcp_v2,
},
[RTE_FLOW_ITEM_TYPE_SCTP] = {
.copy_item = enic_copy_item_sctp_v2,
@@ -170,6 +205,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
.copy_item = enic_copy_item_vxlan_v2,
@@ -178,6 +214,7 @@ static const struct enic_items enic_items_v2[] = {
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
};
@@ -190,6 +227,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_VXLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_eth_v2,
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.copy_item = enic_copy_item_vlan_v2,
@@ -198,6 +236,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_vlan_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV4] = {
.copy_item = enic_copy_item_ipv4_v2,
@@ -207,6 +246,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv4_v2,
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.copy_item = enic_copy_item_ipv6_v2,
@@ -216,6 +256,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_ipv6_v2,
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.copy_item = enic_copy_item_udp_v2,
@@ -225,6 +266,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_udp_v2,
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.copy_item = enic_copy_item_tcp_v2,
@@ -234,15 +276,17 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = enic_copy_item_inner_tcp_v2,
},
[RTE_FLOW_ITEM_TYPE_SCTP] = {
.copy_item = enic_copy_item_sctp_v2,
- .valid_start_item = 1,
+ .valid_start_item = 0,
.prev_items = (const enum rte_flow_item_type[]) {
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
.copy_item = enic_copy_item_vxlan_v2,
@@ -251,6 +295,7 @@ static const struct enic_items enic_items_v3[] = {
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
},
+ .inner_copy_item = NULL,
},
};
@@ -258,12 +303,15 @@ static const struct enic_items enic_items_v3[] = {
static const struct enic_filter_cap enic_filter_cap[] = {
[FILTER_IPV4_5TUPLE] = {
.item_info = enic_items_v1,
+ .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
},
[FILTER_USNIC_IP] = {
.item_info = enic_items_v2,
+ .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
[FILTER_DPDK_1] = {
.item_info = enic_items_v3,
+ .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
};
@@ -330,20 +378,11 @@ mask_exact_match(const u8 *supported, const u8 *supplied,
return 1;
}
-/**
- * Copy IPv4 item into version 1 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Should always be 0 for version 1.
- */
static int
-enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_ipv4_v1(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
@@ -354,9 +393,6 @@ enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
-
if (!mask)
mask = &rte_flow_item_ipv4_mask;
@@ -380,20 +416,11 @@ enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy UDP item into version 1 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Should always be 0 for version 1.
- */
static int
-enic_copy_item_udp_v1(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_udp_v1(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
@@ -404,9 +431,6 @@ enic_copy_item_udp_v1(const struct rte_flow_item *item,
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
-
if (!mask)
mask = &rte_flow_item_udp_mask;
@@ -431,20 +455,11 @@ enic_copy_item_udp_v1(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy TCP item into version 1 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Should always be 0 for version 1.
- */
static int
-enic_copy_item_tcp_v1(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_tcp_v1(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
@@ -455,9 +470,6 @@ enic_copy_item_tcp_v1(const struct rte_flow_item *item,
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
-
if (!mask)
mask = &rte_flow_item_tcp_mask;
@@ -482,21 +494,150 @@ enic_copy_item_tcp_v1(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy ETH item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * If zero, this is an outer header. If non-zero, this is the offset into L5
- * where the header begins.
+/*
+ * The common 'copy' function for all inner packet patterns. Patterns are
+ * first appended to the L5 pattern buffer. Then, since the NIC filter
+ * API has no special support for inner packet matching at the moment,
+ * we set EtherType and IP proto as necessary.
*/
static int
-enic_copy_item_eth_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
+ const void *val, const void *mask, uint8_t val_size,
+ uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
+{
+ uint8_t *l5_mask, *l5_val;
+ uint8_t start_off;
+
+ /* No space left in the L5 pattern buffer. */
+ start_off = *inner_ofst;
+ if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
+ l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
+ /* Copy the pattern into the L5 buffer. */
+ if (val) {
+ memcpy(l5_mask + start_off, mask, val_size);
+ memcpy(l5_val + start_off, val, val_size);
+ }
+ /* Set the protocol field in the previous header. */
+ if (proto_off) {
+ void *m, *v;
+
+ m = l5_mask + proto_off;
+ v = l5_val + proto_off;
+ if (proto_size == 1) {
+ *(uint8_t *)m = 0xff;
+ *(uint8_t *)v = (uint8_t)proto_val;
+ } else if (proto_size == 2) {
+ *(uint16_t *)m = 0xffff;
+ *(uint16_t *)v = proto_val;
+ }
+ }
+ /* All inner headers land in L5 buffer even if their spec is null. */
+ *inner_ofst += val_size;
+ return 0;
+}
+
+static int
+enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct ether_hdr),
+ 0 /* no previous protocol */, 0, 0);
+}
+
+static int
+enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+ uint8_t eth_type_off;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ /* Append vlan header to L5 and set ether type = TPID */
+ eth_type_off = arg->l2_proto_off;
+ arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct vlan_hdr),
+ eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
+}
+
+static int
+enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ /* Append ipv4 header to L5 and set ether type = ipv4 */
+ arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct ipv4_hdr),
+ arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
+}
+
+static int
+enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ /* Append ipv6 header to L5 and set ether type = ipv6 */
+ arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct ipv6_hdr),
+ arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
+}
+
+static int
+enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ /* Append udp header to L5 and set ip proto = udp */
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct udp_hdr),
+ arg->l3_proto_off, IPPROTO_UDP, 1);
+}
+
+static int
+enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ FLOW_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ /* Append tcp header to L5 and set ip proto = tcp */
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct tcp_hdr),
+ arg->l3_proto_off, IPPROTO_TCP, 1);
+}
+
+static int
+enic_copy_item_eth_v2(struct copy_item_args *arg)
+{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
struct ether_hdr enic_spec;
struct ether_hdr enic_mask;
const struct rte_flow_item_eth *spec = item->spec;
@@ -524,45 +665,24 @@ enic_copy_item_eth_v2(const struct rte_flow_item *item,
enic_spec.ether_type = spec->type;
enic_mask.ether_type = mask->type;
- if (*inner_ofst == 0) {
- /* outer header */
- memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
- sizeof(struct ether_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
- sizeof(struct ether_hdr));
- } else {
- /* inner header */
- if ((*inner_ofst + sizeof(struct ether_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- /* Offset into L5 where inner Ethernet header goes */
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- &enic_mask, sizeof(struct ether_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- &enic_spec, sizeof(struct ether_hdr));
- *inner_ofst += sizeof(struct ether_hdr);
- }
+ /* outer header */
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
+ sizeof(struct ether_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
+ sizeof(struct ether_hdr));
return 0;
}
-/**
- * Copy VLAN item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * If zero, this is an outer header. If non-zero, this is the offset into L5
- * where the header begins.
- */
static int
-enic_copy_item_vlan_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_vlan_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+ struct ether_hdr *eth_mask;
+ struct ether_hdr *eth_val;
FLOW_TRACE();
@@ -573,99 +693,72 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item,
if (!mask)
mask = &rte_flow_item_vlan_mask;
- if (*inner_ofst == 0) {
- struct ether_hdr *eth_mask =
- (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
- struct ether_hdr *eth_val =
- (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+ eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
+ eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+ /* Outer TPID cannot be matched */
+ if (eth_mask->ether_type)
+ return ENOTSUP;
+ /*
+ * For recent models:
+ * When packet matching, the VIC always compares vlan-stripped
+ * L2, regardless of vlan stripping settings. So, the inner type
+ * from vlan becomes the ether type of the eth header.
+ *
+ * Older models w/o hardware vxlan parser have a different
+ * behavior when vlan stripping is disabled. In this case,
+ * vlan tag remains in the L2 buffer.
+ */
+ if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
+ struct vlan_hdr *vlan;
- /* Outer TPID cannot be matched */
- if (eth_mask->ether_type)
- return ENOTSUP;
+ vlan = (struct vlan_hdr *)(eth_mask + 1);
+ vlan->eth_proto = mask->inner_type;
+ vlan = (struct vlan_hdr *)(eth_val + 1);
+ vlan->eth_proto = spec->inner_type;
+ } else {
eth_mask->ether_type = mask->inner_type;
eth_val->ether_type = spec->inner_type;
-
- /* Outer header. Use the vlan mask/val fields */
- gp->mask_vlan = mask->tci;
- gp->val_vlan = spec->tci;
- } else {
- /* Inner header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct vlan_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct vlan_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct vlan_hdr));
- *inner_ofst += sizeof(struct vlan_hdr);
}
+ /* For TCI, use the vlan mask/val fields (little endian). */
+ gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
+ gp->val_vlan = rte_be_to_cpu_16(spec->tci);
return 0;
}
-/**
- * Copy IPv4 item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner IPv4 filtering.
- */
static int
-enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_ipv4_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
FLOW_TRACE();
- if (*inner_ofst == 0) {
- /* Match IPv4 */
- gp->mask_flags |= FILTER_GENERIC_1_IPV4;
- gp->val_flags |= FILTER_GENERIC_1_IPV4;
+ /* Match IPv4 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV4;
+ gp->val_flags |= FILTER_GENERIC_1_IPV4;
- /* Match all if no spec */
- if (!spec)
- return 0;
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
- if (!mask)
- mask = &rte_flow_item_ipv4_mask;
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
- memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
- sizeof(struct ipv4_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
- sizeof(struct ipv4_hdr));
- } else {
- /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct ipv4_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct ipv4_hdr));
- *inner_ofst += sizeof(struct ipv4_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv4_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv4_hdr));
return 0;
}
-/**
- * Copy IPv6 item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner IPv6 filtering.
- */
static int
-enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_ipv6_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
@@ -683,39 +776,18 @@ enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
if (!mask)
mask = &rte_flow_item_ipv6_mask;
- if (*inner_ofst == 0) {
- memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
- sizeof(struct ipv6_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
- sizeof(struct ipv6_hdr));
- } else {
- /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct ipv6_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct ipv6_hdr));
- *inner_ofst += sizeof(struct ipv6_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv6_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv6_hdr));
return 0;
}
-/**
- * Copy UDP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner UDP filtering.
- */
static int
-enic_copy_item_udp_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_udp_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
@@ -733,39 +805,18 @@ enic_copy_item_udp_v2(const struct rte_flow_item *item,
if (!mask)
mask = &rte_flow_item_udp_mask;
- if (*inner_ofst == 0) {
- memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
- sizeof(struct udp_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
- sizeof(struct udp_hdr));
- } else {
- /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct udp_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct udp_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct udp_hdr));
- *inner_ofst += sizeof(struct udp_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct udp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct udp_hdr));
return 0;
}
-/**
- * Copy TCP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner TCP filtering.
- */
static int
-enic_copy_item_tcp_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_tcp_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
@@ -783,47 +834,48 @@ enic_copy_item_tcp_v2(const struct rte_flow_item *item,
if (!mask)
return ENOTSUP;
- if (*inner_ofst == 0) {
- memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
- sizeof(struct tcp_hdr));
- memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
- sizeof(struct tcp_hdr));
- } else {
- /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct tcp_hdr)) >
- FILTER_GENERIC_1_KEY_LEN)
- return ENOTSUP;
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct tcp_hdr));
- memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct tcp_hdr));
- *inner_ofst += sizeof(struct tcp_hdr);
- }
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct tcp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct tcp_hdr));
return 0;
}
-/**
- * Copy SCTP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. Don't support inner SCTP filtering.
- */
static int
-enic_copy_item_sctp_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_sctp_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
const struct rte_flow_item_sctp *spec = item->spec;
const struct rte_flow_item_sctp *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+ uint8_t *ip_proto_mask = NULL;
+ uint8_t *ip_proto = NULL;
FLOW_TRACE();
- if (*inner_ofst)
- return ENOTSUP;
+ /*
+ * The NIC filter API has no flags for "match sctp", so explicitly set
+ * the protocol number in the IP pattern.
+ */
+ if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
+ struct ipv4_hdr *ip;
+ ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
+ ip_proto_mask = &ip->next_proto_id;
+ ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
+ ip_proto = &ip->next_proto_id;
+ } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
+ struct ipv6_hdr *ip;
+ ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
+ ip_proto_mask = &ip->proto;
+ ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
+ ip_proto = &ip->proto;
+ } else {
+ /* Need IPv4/IPv6 pattern first */
+ return EINVAL;
+ }
+ *ip_proto = IPPROTO_SCTP;
+ *ip_proto_mask = 0xff;
/* Match all if no spec */
if (!spec)
@@ -839,29 +891,29 @@ enic_copy_item_sctp_v2(const struct rte_flow_item *item,
return 0;
}
-/**
- * Copy UDP item into version 2 NIC filter.
- *
- * @param item[in]
- * Item specification.
- * @param enic_filter[out]
- * Partially filled in NIC filter structure.
- * @param inner_ofst[in]
- * Must be 0. VxLAN headers always start at the beginning of L5.
- */
static int
-enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
- struct filter_v2 *enic_filter, u8 *inner_ofst)
+enic_copy_item_vxlan_v2(struct copy_item_args *arg)
{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
+ uint8_t *inner_ofst = arg->inner_ofst;
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+ struct udp_hdr *udp;
FLOW_TRACE();
- if (*inner_ofst)
- return EINVAL;
-
+ /*
+ * The NIC filter API has no flags for "match vxlan". Set UDP port to
+ * avoid false positives.
+ */
+ gp->mask_flags |= FILTER_GENERIC_1_UDP;
+ gp->val_flags |= FILTER_GENERIC_1_UDP;
+ udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
+ udp->dst_port = 0xffff;
+ udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
+ udp->dst_port = RTE_BE16(4789);
/* Match all if no spec */
if (!spec)
return 0;
@@ -909,6 +961,36 @@ item_stacking_valid(enum rte_flow_item_type prev_item,
return 0;
}
+/*
+ * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
+ * Instead it is in L4 following the UDP header. Append the vxlan
+ * pattern to L4 (udp) and shift any inner packet pattern in L5.
+ */
+static void
+fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
+ uint8_t inner_ofst)
+{
+ uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
+ uint8_t inner;
+ uint8_t vxlan;
+
+ if (!(inner_ofst > 0 && enic->vxlan))
+ return;
+ FLOW_TRACE();
+ vxlan = sizeof(struct vxlan_hdr);
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
+ gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
+ gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
+ inner = inner_ofst - vxlan;
+ memset(layer, 0, sizeof(layer));
+ memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
+ memset(layer, 0, sizeof(layer));
+ memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
+}
+
/**
* Build the intenal enic filter structure from the provided pattern. The
* pattern is validated as the items are copied.
@@ -922,7 +1004,8 @@ item_stacking_valid(enum rte_flow_item_type prev_item,
*/
static int
enic_copy_filter(const struct rte_flow_item pattern[],
- const struct enic_items *items_info,
+ const struct enic_filter_cap *cap,
+ struct enic *enic,
struct filter_v2 *enic_filter,
struct rte_flow_error *error)
{
@@ -931,13 +1014,17 @@ enic_copy_filter(const struct rte_flow_item pattern[],
u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
enum rte_flow_item_type prev_item;
const struct enic_items *item_info;
-
+ struct copy_item_args args;
+ enic_copy_item_fn *copy_fn;
u8 is_first_item = 1;
FLOW_TRACE();
prev_item = 0;
+ args.filter = enic_filter;
+ args.inner_ofst = &inner_ofst;
+ args.enic = enic;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
/* Get info about how to validate and copy the item. If NULL
* is returned the nic does not support the item.
@@ -945,18 +1032,31 @@ enic_copy_filter(const struct rte_flow_item pattern[],
if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
continue;
- item_info = &items_info[item->type];
+ item_info = &cap->item_info[item->type];
+ if (item->type > cap->max_item_type ||
+ item_info->copy_item == NULL ||
+ (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Unsupported item.");
+ return -rte_errno;
+ }
/* check to see if item stacking is valid */
if (!item_stacking_valid(prev_item, item_info, is_first_item))
goto stacking_error;
- ret = item_info->copy_item(item, enic_filter, &inner_ofst);
+ args.item = item;
+ copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
+ item_info->copy_item;
+ ret = copy_fn(&args);
if (ret)
goto item_not_supported;
prev_item = item->type;
is_first_item = 0;
}
+ fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
+
return 0;
item_not_supported:
@@ -1057,12 +1157,18 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
if (overlap & MARK)
return ENOTSUP;
overlap |= MARK;
- /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
- * in the range of allows mark ids.
+ /*
+ * Map mark ID (32-bit) to filter ID (16-bit):
+ * - Reject values > 16 bits
+ * - Filter ID 0 is reserved for filters that steer
+ * but not mark. So add 1 to the mark ID to avoid
+ * using 0.
+ * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
+ * reserved for the "flag" action below.
*/
- if (mark->id >= ENIC_MAGIC_FILTER_ID)
+ if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
return EINVAL;
- enic_action->filter_id = mark->id;
+ enic_action->filter_id = mark->id + 1;
enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
break;
}
@@ -1070,6 +1176,7 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
if (overlap & MARK)
return ENOTSUP;
overlap |= MARK;
+ /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
break;
@@ -1392,7 +1499,7 @@ enic_flow_parse(struct rte_eth_dev *dev,
return -rte_errno;
}
enic_filter->type = enic->flow_filter_mode;
- ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
+ ret = enic_copy_filter(pattern, enic_filter_cap, enic,
enic_filter, error);
return ret;
}
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 406f92a8..098a18d6 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1715,8 +1715,15 @@ static int enic_dev_init(struct enic *enic)
PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_MASK;
enic->overlay_offload = true;
- enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
dev_info(enic, "Overlay offload is enabled\n");
+ }
+ /*
+ * Reset the vxlan port if HW vxlan parsing is available. It
+ * is always enabled regardless of overlay offload
+ * enable/disable.
+ */
+ if (enic->vxlan) {
+ enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
/*
* Reset the vxlan port to the default, as the NIC firmware
* does not reset it automatically and keeps the old setting.
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 24b2844f..78bb6b8f 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -61,10 +61,9 @@ int enic_get_vnic_config(struct enic *enic)
* and will be 0 for legacy firmware and VICs
*/
if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
- enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
+ enic->max_mtu = c->max_pkt_size - ETHER_HDR_LEN;
else
- enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE
- - (ETHER_HDR_LEN + 4);
+ enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE - ETHER_HDR_LEN;
if (c->mtu == 0)
c->mtu = 1500;
diff --git a/drivers/net/enic/enic_rxtx_common.h b/drivers/net/enic/enic_rxtx_common.h
index bfbb4909..66f631df 100644
--- a/drivers/net/enic/enic_rxtx_common.h
+++ b/drivers/net/enic/enic_rxtx_common.h
@@ -226,7 +226,8 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
if (filter_id) {
pkt_flags |= PKT_RX_FDIR;
if (filter_id != ENIC_MAGIC_FILTER_ID) {
- mbuf->hash.fdir.hi = clsf_cqd->filter_id;
+ /* filter_id = mark id + 1, so subtract 1 */
+ mbuf->hash.fdir.hi = filter_id - 1;
pkt_flags |= PKT_RX_FDIR_ID;
}
}
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 1d0f09d2..fb02e115 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -134,7 +134,7 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
@@ -295,7 +295,7 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- first_seg->ol_flags |= PKT_RX_VLAN;
+ first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
first_seg->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 005fda63..96b46a2b 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -52,8 +52,10 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN, PKT_RX_VLAN,
- PKT_RX_VLAN, PKT_RX_VLAN);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index dca61f03..af5e844b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -2671,11 +2671,11 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
#define I40E_PRTMAC_MACC 0x001E24E0
#define I40E_REG_MACC_25GB 0x00020000
#define I40E_REG_SPEED_MASK 0x38000000
-#define I40E_REG_SPEED_100MB 0x00000000
-#define I40E_REG_SPEED_1GB 0x08000000
-#define I40E_REG_SPEED_10GB 0x10000000
-#define I40E_REG_SPEED_20GB 0x20000000
-#define I40E_REG_SPEED_25_40GB 0x18000000
+#define I40E_REG_SPEED_0 0x00000000
+#define I40E_REG_SPEED_1 0x08000000
+#define I40E_REG_SPEED_2 0x10000000
+#define I40E_REG_SPEED_3 0x18000000
+#define I40E_REG_SPEED_4 0x20000000
uint32_t link_speed;
uint32_t reg_val;
@@ -2689,26 +2689,35 @@ update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
/* Parse the link status */
switch (link_speed) {
- case I40E_REG_SPEED_100MB:
+ case I40E_REG_SPEED_0:
link->link_speed = ETH_SPEED_NUM_100M;
break;
- case I40E_REG_SPEED_1GB:
+ case I40E_REG_SPEED_1:
link->link_speed = ETH_SPEED_NUM_1G;
break;
- case I40E_REG_SPEED_10GB:
- link->link_speed = ETH_SPEED_NUM_10G;
- break;
- case I40E_REG_SPEED_20GB:
- link->link_speed = ETH_SPEED_NUM_20G;
+ case I40E_REG_SPEED_2:
+ if (hw->mac.type == I40E_MAC_X722)
+ link->link_speed = ETH_SPEED_NUM_2_5G;
+ else
+ link->link_speed = ETH_SPEED_NUM_10G;
break;
- case I40E_REG_SPEED_25_40GB:
- reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
+ case I40E_REG_SPEED_3:
+ if (hw->mac.type == I40E_MAC_X722) {
+ link->link_speed = ETH_SPEED_NUM_5G;
+ } else {
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
- if (reg_val & I40E_REG_MACC_25GB)
- link->link_speed = ETH_SPEED_NUM_25G;
+ if (reg_val & I40E_REG_MACC_25GB)
+ link->link_speed = ETH_SPEED_NUM_25G;
+ else
+ link->link_speed = ETH_SPEED_NUM_40G;
+ }
+ break;
+ case I40E_REG_SPEED_4:
+ if (hw->mac.type == I40E_MAC_X722)
+ link->link_speed = ETH_SPEED_NUM_10G;
else
- link->link_speed = ETH_SPEED_NUM_40G;
-
+ link->link_speed = ETH_SPEED_NUM_20G;
break;
default:
PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
@@ -10830,6 +10839,7 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
switch (link.link_speed) {
case ETH_SPEED_NUM_40G:
+ case ETH_SPEED_NUM_25G:
tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
break;
@@ -11890,16 +11900,17 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
bool is_sfp = false;
i40e_status status;
- uint8_t *data = info->data;
+ uint8_t *data;
uint32_t value = 0;
uint32_t i;
- if (!info || !info->length || !data)
+ if (!info || !info->length || !info->data)
return -EINVAL;
if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
is_sfp = true;
+ data = info->data;
for (i = 0; i < info->length; i++) {
u32 offset = i + info->offset;
u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
@@ -12201,8 +12212,8 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
for (n = 0; n < proto_num; n++) {
if (proto[n].proto_id != proto_id)
continue;
- strcat(name, proto[n].name);
- strcat(name, "_");
+ strlcat(name, proto[n].name, sizeof(name));
+ strlcat(name, "_", sizeof(name));
break;
}
}
@@ -12699,9 +12710,6 @@ i40e_config_rss_filter(struct i40e_pf *pf,
return -EINVAL;
}
- if (rss_info->conf.queue_num)
- return -EINVAL;
-
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calculate the actual PF queues that are configured.
*/
@@ -12744,6 +12752,8 @@ i40e_config_rss_filter(struct i40e_pf *pf,
rss_conf.rss_key = (uint8_t *)rss_key_default;
rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
+ PMD_DRV_LOG(INFO,
+ "No valid RSS key config for i40e, using default\n");
}
i40e_hw_rss_hash_set(pf, &rss_conf);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 100e71cc..551f6fa6 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1724,9 +1724,8 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- }
return 0;
}
@@ -2262,11 +2261,11 @@ i40evf_dev_close(struct rte_eth_dev *dev)
*/
i40evf_dev_promiscuous_disable(dev);
i40evf_dev_allmulticast_disable(dev);
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
i40evf_reset_vf(dev);
i40e_shutdown_adminq(hw);
i40evf_disable_irq0(hw);
- rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
hw->adapter_closed = 1;
}
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3694df25..a614ec1d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4445,6 +4445,14 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
}
+ if (rss_info->conf.queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "rss only allow one valid rule");
+ return -rte_errno;
+ }
+
/* Parse RSS related parameters from configuration */
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
return rte_flow_error_set
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8f727fae..1489552d 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1718,7 +1718,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
(uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
int use_scattered_rx =
- ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
+ (rxq->max_pkt_len > buf_size);
if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
@@ -2423,13 +2423,13 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
struct rte_eth_dev *dev;
uint16_t i;
- dev = &rte_eth_devices[txq->port_id];
-
if (!txq || !txq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
return;
}
+ dev = &rte_eth_devices[txq->port_id];
+
/**
* vPMD tx will not set sw_ring's mbuf to NULL after free,
* so need to free remains more carefully.
@@ -2708,9 +2708,8 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
RTE_PKTMBUF_HEADROOM);
/* Check if scattered RX needs to be used. */
- if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ if (rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- }
/* Init the RX tail regieter. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index be4a6024..a1313146 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -270,7 +270,7 @@ struct rte_pmd_i40e_pkt_template_action {
struct rte_pmd_i40e_pkt_template_input {
/** the pctype used for raw packet template */
uint16_t pctype;
- /** the buffer conatining raw packet template */
+ /** the buffer containing raw packet template */
void *packet;
/** the length of buffer with raw packet template */
uint32_t length;
@@ -314,7 +314,7 @@ struct rte_pmd_i40e_inset {
* @param conf
* Specifies configuration parameters of raw packet template filter.
* @param add
- * Speicifes an action to be taken - add or remove raw packet template filter.
+ * Specifies an action to be taken - add or remove raw packet template filter.
* @return
* - (0) if successful.
* - (-ENODEV) if *port* invalid.
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9a79d18e..46c93f59 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2029,7 +2029,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
struct ixgbe_rx_entry *next_rxe = NULL;
struct rte_mbuf *first_seg;
struct rte_mbuf *rxm;
- struct rte_mbuf *nmb;
+ struct rte_mbuf *nmb = NULL;
union ixgbe_adv_rx_desc rxd;
uint16_t data_len;
uint16_t next_id;
@@ -2853,14 +2853,14 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER;
if (hw->mac.type == ixgbe_mac_82598EB)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
if (ixgbe_is_vf(dev) == 0)
- offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_EXTEND);
+ offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
/*
* RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index a1e9970d..9879985e 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -454,6 +454,7 @@ eth_kni_remove(struct rte_vdev_device *vdev)
struct rte_eth_dev *eth_dev;
struct pmd_internals *internals;
const char *name;
+ int ret;
name = rte_vdev_device_name(vdev);
PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name);
@@ -472,7 +473,9 @@ eth_kni_remove(struct rte_vdev_device *vdev)
eth_kni_dev_stop(eth_dev);
internals = eth_dev->data->dev_private;
- rte_kni_release(internals->kni);
+ ret = rte_kni_release(internals->kni);
+ if (ret)
+ PMD_LOG(WARNING, "Not able to release kni for %s", name);
rte_eth_dev_release_port(eth_dev);
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 7f07b8dc..4bc966d5 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -81,7 +81,7 @@ static void mlx4_dev_stop(struct rte_eth_dev *dev);
static int
mlx4_dev_configure(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
@@ -117,7 +117,7 @@ exit:
static int
mlx4_dev_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
@@ -169,7 +169,7 @@ err:
static void
mlx4_dev_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (!priv->started)
return;
@@ -194,7 +194,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
static void
mlx4_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
unsigned int i;
DEBUG("%p: closing device \"%s\"",
@@ -599,7 +599,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_context *ctx = NULL;
struct ibv_port_attr port_attr;
struct ibv_pd *pd = NULL;
- struct priv *priv = NULL;
+ struct mlx4_priv *priv = NULL;
struct rte_eth_dev *eth_dev = NULL;
struct ether_addr mac;
@@ -752,11 +752,11 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
* handled by rte_intr_rx_ctl().
*/
eth_dev->intr_handle = &priv->intr_handle;
- priv->dev = eth_dev;
+ priv->dev_data = eth_dev->data;
eth_dev->dev_ops = &mlx4_dev_ops;
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
- mlx4_dev_set_link_up(priv->dev);
+ mlx4_dev_set_link_up(eth_dev);
/* Update link status once if waiting for LSC. */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
mlx4_link_update(eth_dev, 0);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index e6fb934f..fc568eb3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -72,13 +72,14 @@ struct rxq;
struct txq;
struct rte_flow;
-LIST_HEAD(mlx4_dev_list, priv);
+LIST_HEAD(mlx4_dev_list, mlx4_priv);
LIST_HEAD(mlx4_mr_list, mlx4_mr);
/** Private data structure. */
-struct priv {
- LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
- struct rte_eth_dev *dev; /**< Ethernet device. */
+struct mlx4_priv {
+ LIST_ENTRY(mlx4_priv) mem_event_cb;
+ /**< Called by memory event callback. */
+ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /**< Verbs context. */
struct ibv_device_attr device_attr; /**< Device properties. */
struct ibv_pd *pd; /**< Protection Domain. */
@@ -112,11 +113,14 @@ struct priv {
/**< Configured MAC addresses. Unused entries are zeroed. */
};
+#define PORT_ID(priv) ((priv)->dev_data->port_id)
+#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
+
/* mlx4_ethdev.c */
-int mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]);
-int mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
-int mlx4_mtu_get(struct priv *priv, uint16_t *mtu);
+int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);
+int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
+int mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu);
int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
int mlx4_dev_set_link_up(struct rte_eth_dev *dev);
@@ -143,10 +147,10 @@ int mlx4_is_removed(struct rte_eth_dev *dev);
/* mlx4_intr.c */
-int mlx4_intr_uninstall(struct priv *priv);
-int mlx4_intr_install(struct priv *priv);
-int mlx4_rxq_intr_enable(struct priv *priv);
-void mlx4_rxq_intr_disable(struct priv *priv);
+int mlx4_intr_uninstall(struct mlx4_priv *priv);
+int mlx4_intr_install(struct mlx4_priv *priv);
+int mlx4_rxq_intr_enable(struct mlx4_priv *priv);
+void mlx4_rxq_intr_disable(struct mlx4_priv *priv);
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 195a1b6d..084b24e4 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -59,7 +59,7 @@
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
+mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE])
{
DIR *dir;
struct dirent *dent;
@@ -146,7 +146,7 @@ try_dev_id:
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
+mlx4_ifreq(const struct mlx4_priv *priv, int req, struct ifreq *ifr)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
int ret;
@@ -176,7 +176,7 @@ mlx4_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
{
struct ifreq request;
int ret = mlx4_ifreq(priv, SIOCGIFHWADDR, &request);
@@ -199,7 +199,7 @@ mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
+mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu)
{
struct ifreq request;
int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request);
@@ -224,7 +224,7 @@ mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
int
mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct ifreq request = { .ifr_mtu = mtu, };
int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request);
@@ -248,7 +248,7 @@ mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
+mlx4_set_flags(struct mlx4_priv *priv, unsigned int keep, unsigned int flags)
{
struct ifreq request;
int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request);
@@ -272,7 +272,7 @@ mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_dev_set_link(struct priv *priv, int up)
+mlx4_dev_set_link(struct mlx4_priv *priv, int up)
{
int err;
@@ -300,7 +300,7 @@ mlx4_dev_set_link(struct priv *priv, int up)
int
mlx4_dev_set_link_down(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
return mlx4_dev_set_link(priv, 0);
}
@@ -317,7 +317,7 @@ mlx4_dev_set_link_down(struct rte_eth_dev *dev)
int
mlx4_dev_set_link_up(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
return mlx4_dev_set_link(priv, 1);
}
@@ -345,7 +345,7 @@ enum rxmode_toggle {
static void
mlx4_rxmode_toggle(struct rte_eth_dev *dev, enum rxmode_toggle toggle)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
const char *mode;
struct rte_flow_error error;
@@ -430,7 +430,7 @@ mlx4_allmulticast_disable(struct rte_eth_dev *dev)
void
mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
if (index >= RTE_DIM(priv->mac)) {
@@ -466,7 +466,7 @@ int
mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
int ret;
@@ -503,7 +503,7 @@ mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
int
mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow_error error;
unsigned int vidx = vlan_id / 64;
unsigned int vbit = vlan_id % 64;
@@ -557,7 +557,7 @@ mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
void
mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -688,7 +688,7 @@ mlx4_stats_reset(struct rte_eth_dev *dev)
int
mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- const struct priv *priv = dev->data->dev_private;
+ const struct mlx4_priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET,
};
@@ -741,7 +741,7 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
int
mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_GPAUSEPARAM,
@@ -785,7 +785,7 @@ out:
int
mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_SPAUSEPARAM,
@@ -853,7 +853,7 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_UNKNOWN
};
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (dev->rx_pkt_burst == mlx4_rx_burst) {
if (priv->hw_csum_l2tun)
@@ -877,7 +877,7 @@ int
mlx4_is_removed(struct rte_eth_dev *dev)
{
struct ibv_device_attr device_attr;
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (mlx4_glue->query_device(priv->ctx, &device_attr) == EIO)
return 1;
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index b40e7e5c..5136d136 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -71,7 +71,7 @@ struct mlx4_flow_proc_item {
struct mlx4_drop {
struct ibv_qp *qp; /**< QP target. */
struct ibv_cq *cq; /**< CQ associated with above QP. */
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
uint32_t refcnt; /**< Reference count. */
};
@@ -95,7 +95,7 @@ struct mlx4_drop {
* rte_errno is set.
*/
uint64_t
-mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk)
+mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
{
enum {
INNER,
@@ -657,7 +657,7 @@ static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_prepare(struct priv *priv,
+mlx4_flow_prepare(struct mlx4_priv *priv,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -767,7 +767,7 @@ fill:
if (flow->rss)
break;
queue = action->conf;
- if (queue->index >= priv->dev->data->nb_rx_queues) {
+ if (queue->index >= ETH_DEV(priv)->data->nb_rx_queues) {
msg = "queue target index beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
@@ -796,7 +796,7 @@ fill:
/* Sanity checks. */
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
- priv->dev->data->nb_rx_queues)
+ ETH_DEV(priv)->data->nb_rx_queues)
break;
if (i != rss->queue_num) {
msg = "queue index target beyond number of"
@@ -928,7 +928,7 @@ mlx4_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
}
@@ -944,7 +944,7 @@ mlx4_flow_validate(struct rte_eth_dev *dev,
* is set.
*/
static struct mlx4_drop *
-mlx4_drop_get(struct priv *priv)
+mlx4_drop_get(struct mlx4_priv *priv)
{
struct mlx4_drop *drop = priv->drop;
@@ -1020,7 +1020,7 @@ mlx4_drop_put(struct mlx4_drop *drop)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_toggle(struct priv *priv,
+mlx4_flow_toggle(struct mlx4_priv *priv,
struct rte_flow *flow,
int enable,
struct rte_flow_error *error)
@@ -1066,8 +1066,8 @@ mlx4_flow_toggle(struct priv *priv,
/* Stop at the first nonexistent target queue. */
for (i = 0; i != rss->queues; ++i)
if (rss->queue_id[i] >=
- priv->dev->data->nb_rx_queues ||
- !priv->dev->data->rx_queues[rss->queue_id[i]]) {
+ ETH_DEV(priv)->data->nb_rx_queues ||
+ !ETH_DEV(priv)->data->rx_queues[rss->queue_id[i]]) {
missing = 1;
break;
}
@@ -1136,7 +1136,7 @@ mlx4_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int err;
@@ -1177,7 +1177,7 @@ mlx4_flow_isolate(struct rte_eth_dev *dev,
int enable,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
if (!!enable == !!priv->isolated)
return 0;
@@ -1200,7 +1200,7 @@ mlx4_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
int err = mlx4_flow_toggle(priv, flow, 0, error);
if (err)
@@ -1224,7 +1224,7 @@ static int
mlx4_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_flow *flow = LIST_FIRST(&priv->flows);
while (flow) {
@@ -1249,10 +1249,10 @@ mlx4_flow_flush(struct rte_eth_dev *dev,
* Next configured VLAN ID or a high value (>= 4096) if there is none.
*/
static uint16_t
-mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
+mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
{
while (vlan < 4096) {
- if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
+ if (ETH_DEV(priv)->data->vlan_filter_conf.ids[vlan / 64] &
(UINT64_C(1) << (vlan % 64)))
return vlan;
++vlan;
@@ -1289,7 +1289,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
+mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
{
struct rte_flow_attr attr = {
.priority = MLX4_FLOW_PRIORITY_LAST,
@@ -1329,7 +1329,7 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
* get RSS by default.
*/
uint32_t queues =
- rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
+ rte_align32pow2(ETH_DEV(priv)->data->nb_rx_queues + 1) >> 1;
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
@@ -1351,9 +1351,9 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
};
struct ether_addr *rule_mac = &eth_spec.dst;
rte_be16_t *rule_vlan =
- (priv->dev->data->dev_conf.rxmode.offloads &
+ (ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) &&
- !priv->dev->data->promiscuous ?
+ !ETH_DEV(priv)->data->promiscuous ?
&vlan_spec.tci :
NULL;
uint16_t vlan = 0;
@@ -1433,7 +1433,7 @@ next_vlan:
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
memcpy(rule_mac, mac, sizeof(*mac));
- flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
@@ -1449,15 +1449,16 @@ next_vlan:
goto next_vlan;
}
/* Take care of promiscuous and all multicast flow rules. */
- if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
+ if (ETH_DEV(priv)->data->promiscuous ||
+ ETH_DEV(priv)->data->all_multicast) {
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_NEXT(flow, next)) {
- if (priv->dev->data->promiscuous) {
+ if (ETH_DEV(priv)->data->promiscuous) {
if (flow->promisc)
break;
} else {
- assert(priv->dev->data->all_multicast);
+ assert(ETH_DEV(priv)->data->all_multicast);
if (flow->allmulti)
break;
}
@@ -1471,16 +1472,16 @@ next_vlan:
}
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
- if (priv->dev->data->promiscuous) {
+ if (ETH_DEV(priv)->data->promiscuous) {
pattern[1].spec = NULL;
pattern[1].mask = NULL;
} else {
- assert(priv->dev->data->all_multicast);
+ assert(ETH_DEV(priv)->data->all_multicast);
pattern[1].spec = &eth_allmulti;
pattern[1].mask = &eth_allmulti;
}
pattern[2] = pattern[3];
- flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
@@ -1497,7 +1498,8 @@ error:
struct rte_flow *next = LIST_NEXT(flow, next);
if (!flow->select)
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
else
flow->select = 0;
flow = next;
@@ -1521,7 +1523,7 @@ error:
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
+mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error)
{
struct rte_flow *flow;
int ret;
@@ -1535,7 +1537,8 @@ mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
} else {
/* Refresh internal rules. */
ret = mlx4_flow_internal(priv, error);
@@ -1563,12 +1566,12 @@ mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
* Pointer to private structure.
*/
void
-mlx4_flow_clean(struct priv *priv)
+mlx4_flow_clean(struct mlx4_priv *priv)
{
struct rte_flow *flow;
while ((flow = LIST_FIRST(&priv->flows)))
- mlx4_flow_destroy(priv->dev, flow, NULL);
+ mlx4_flow_destroy(ETH_DEV(priv), flow, NULL);
assert(LIST_EMPTY(&priv->rss));
}
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index 2917ebe9..03a4bd05 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -48,10 +48,10 @@ struct rte_flow {
/* mlx4_flow.c */
-uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t types,
+uint64_t mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types,
int verbs_to_dpdk);
-int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
-void mlx4_flow_clean(struct priv *priv);
+int mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error);
+void mlx4_flow_clean(struct mlx4_priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index eeb982a0..4f335267 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -33,7 +33,7 @@
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
-static int mlx4_link_status_check(struct priv *priv);
+static int mlx4_link_status_check(struct mlx4_priv *priv);
/**
* Clean up Rx interrupts handler.
@@ -42,7 +42,7 @@ static int mlx4_link_status_check(struct priv *priv);
* Pointer to private structure.
*/
static void
-mlx4_rx_intr_vec_disable(struct priv *priv)
+mlx4_rx_intr_vec_disable(struct mlx4_priv *priv)
{
struct rte_intr_handle *intr_handle = &priv->intr_handle;
@@ -62,10 +62,10 @@ mlx4_rx_intr_vec_disable(struct priv *priv)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_rx_intr_vec_enable(struct priv *priv)
+mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
{
unsigned int i;
- unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
+ unsigned int rxqs_n = ETH_DEV(priv)->data->nb_rx_queues;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
unsigned int count = 0;
struct rte_intr_handle *intr_handle = &priv->intr_handle;
@@ -79,7 +79,7 @@ mlx4_rx_intr_vec_enable(struct priv *priv)
return -rte_errno;
}
for (i = 0; i != n; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
/* Skip queues that cannot request interrupts. */
if (!rxq || !rxq->channel) {
@@ -117,15 +117,15 @@ mlx4_rx_intr_vec_enable(struct priv *priv)
* Pointer to private structure.
*/
static void
-mlx4_link_status_alarm(struct priv *priv)
+mlx4_link_status_alarm(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
assert(priv->intr_alarm == 1);
priv->intr_alarm = 0;
if (intr_conf->lsc && !mlx4_link_status_check(priv))
- _rte_eth_dev_callback_process(priv->dev,
+ _rte_eth_dev_callback_process(ETH_DEV(priv),
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
@@ -143,10 +143,10 @@ mlx4_link_status_alarm(struct priv *priv)
* otherwise and rte_errno is set.
*/
static int
-mlx4_link_status_check(struct priv *priv)
+mlx4_link_status_check(struct mlx4_priv *priv)
{
- struct rte_eth_link *link = &priv->dev->data->dev_link;
- int ret = mlx4_link_update(priv->dev, 0);
+ struct rte_eth_link *link = &ETH_DEV(priv)->data->dev_link;
+ int ret = mlx4_link_update(ETH_DEV(priv), 0);
if (ret)
return ret;
@@ -175,7 +175,7 @@ mlx4_link_status_check(struct priv *priv)
* Pointer to private structure.
*/
static void
-mlx4_interrupt_handler(struct priv *priv)
+mlx4_interrupt_handler(struct mlx4_priv *priv)
{
enum { LSC, RMV, };
static const enum rte_eth_event_type type[] = {
@@ -185,7 +185,7 @@ mlx4_interrupt_handler(struct priv *priv)
uint32_t caught[RTE_DIM(type)] = { 0 };
struct ibv_async_event event;
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
unsigned int i;
/* Read all message and acknowledge them. */
@@ -208,7 +208,7 @@ mlx4_interrupt_handler(struct priv *priv)
}
for (i = 0; i != RTE_DIM(caught); ++i)
if (caught[i])
- _rte_eth_dev_callback_process(priv->dev, type[i],
+ _rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
NULL);
}
@@ -251,7 +251,7 @@ mlx4_arm_cq(struct rxq *rxq, int solicited)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_intr_uninstall(struct priv *priv)
+mlx4_intr_uninstall(struct mlx4_priv *priv)
{
int err = rte_errno; /* Make sure rte_errno remains unchanged. */
@@ -279,10 +279,10 @@ mlx4_intr_uninstall(struct priv *priv)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_intr_install(struct priv *priv)
+mlx4_intr_install(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
int rc;
mlx4_intr_uninstall(priv);
@@ -378,10 +378,10 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_rxq_intr_enable(struct priv *priv)
+mlx4_rxq_intr_enable(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ &ETH_DEV(priv)->data->dev_conf.intr_conf;
if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
goto error;
@@ -397,7 +397,7 @@ error:
* Pointer to private structure.
*/
void
-mlx4_rxq_intr_disable(struct priv *priv)
+mlx4_rxq_intr_disable(struct mlx4_priv *priv)
{
int err = rte_errno; /* Make sure rte_errno remains unchanged. */
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index a0094483..98c236fb 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -348,7 +348,7 @@ mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
static int
mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
unsigned int n;
DEBUG("port %u inserting MR(%p) to global cache",
@@ -389,7 +389,7 @@ static struct mlx4_mr *
mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
/* Iterate all the existing MRs. */
@@ -430,7 +430,7 @@ static uint32_t
mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
uint16_t idx;
uint32_t lkey = UINT32_MAX;
struct mlx4_mr *mr;
@@ -477,7 +477,7 @@ mr_free(struct mlx4_mr *mr)
}
/**
- * Releass resources of detached MR having no online entry.
+ * Release resources of detached MR having no online entry.
*
* @param dev
* Pointer to Ethernet device.
@@ -485,7 +485,7 @@ mr_free(struct mlx4_mr *mr)
static void
mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr_next;
struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
@@ -525,7 +525,7 @@ mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
}
/**
- * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Create a new global Memory Region (MR) for a missing virtual address.
* Register entire virtually contiguous memory chunk around the address.
*
* @param dev
@@ -543,7 +543,7 @@ static uint32_t
mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
@@ -623,7 +623,7 @@ alloc_resources:
bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
if (mr->ms_bmp == NULL) {
- WARN("port %u unable to initialize bitamp for a new MR of"
+ WARN("port %u unable to initialize bitmap for a new MR of"
" address (%p).",
dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
@@ -769,7 +769,7 @@ err_nolock:
static void
mr_rebuild_dev_cache(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
@@ -801,7 +801,7 @@ mr_rebuild_dev_cache(struct rte_eth_dev *dev)
static void
mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
const struct rte_memseg_list *msl;
struct mlx4_mr *mr;
int ms_n;
@@ -889,14 +889,14 @@ void
mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
- struct priv *priv;
+ struct mlx4_priv *priv;
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
/* Iterate all the existing mlx4 devices. */
LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
- mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
+ mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
break;
case RTE_MEM_EVENT_ALLOC:
@@ -926,7 +926,7 @@ static uint32_t
mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
struct mlx4_mr_cache *entry, uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
uint16_t idx;
uint32_t lkey;
@@ -1024,11 +1024,9 @@ uint32_t
mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
{
struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
- struct priv *priv = rxq->priv;
+ struct mlx4_priv *priv = rxq->priv;
- DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
@@ -1046,11 +1044,9 @@ static uint32_t
mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq->priv;
+ struct mlx4_priv *priv = txq->priv;
- DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
@@ -1122,7 +1118,7 @@ mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
{
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx4_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
@@ -1223,9 +1219,9 @@ uint32_t
mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq->priv;
+ struct mlx4_priv *priv = txq->priv;
- mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp);
+ mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx4_tx_addr2mr_bh(txq, addr);
}
@@ -1289,7 +1285,7 @@ mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
void
mlx4_mr_dump_dev(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
int mr_n = 0;
int chunk_n = 0;
@@ -1332,8 +1328,8 @@ mlx4_mr_dump_dev(struct rte_eth_dev *dev)
void
mlx4_mr_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+ struct mlx4_priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next;
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
@@ -1344,6 +1340,7 @@ mlx4_mr_release(struct rte_eth_dev *dev)
#endif
rte_rwlock_write_lock(&priv->mr.rwlock);
/* Detach from MR list and move to free list. */
+ mr_next = LIST_FIRST(&priv->mr.mr_list);
while (mr_next != NULL) {
struct mlx4_mr *mr = mr_next;
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 6804c634..50f33eb0 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -87,7 +87,7 @@ mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
* Pointer to RSS context on success, NULL otherwise and rte_errno is set.
*/
struct mlx4_rss *
-mlx4_rss_get(struct priv *priv, uint64_t fields,
+mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields,
const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[])
{
@@ -175,7 +175,8 @@ mlx4_rss_attach(struct mlx4_rss *rss)
}
struct ibv_wq *ind_tbl[rss->queues];
- struct priv *priv = rss->priv;
+ struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const char *msg;
unsigned int i = 0;
int ret;
@@ -189,8 +190,8 @@ mlx4_rss_attach(struct mlx4_rss *rss)
uint16_t id = rss->queue_id[i];
struct rxq *rxq = NULL;
- if (id < priv->dev->data->nb_rx_queues)
- rxq = priv->dev->data->rx_queues[id];
+ if (id < dev->data->nb_rx_queues)
+ rxq = dev->data->rx_queues[id];
if (!rxq) {
ret = EINVAL;
msg = "RSS target queue is not configured";
@@ -269,7 +270,7 @@ error:
rss->ind = NULL;
}
while (i--)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
ERROR("mlx4: %s", msg);
--rss->usecnt;
rte_errno = ret;
@@ -290,7 +291,8 @@ error:
void
mlx4_rss_detach(struct mlx4_rss *rss)
{
- struct priv *priv = rss->priv;
+ struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
unsigned int i;
assert(rss->refcnt);
@@ -303,7 +305,7 @@ mlx4_rss_detach(struct mlx4_rss *rss)
claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
rss->ind = NULL;
for (i = 0; i != rss->queues; ++i)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
}
/**
@@ -327,9 +329,9 @@ mlx4_rss_detach(struct mlx4_rss *rss)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_rss_init(struct priv *priv)
+mlx4_rss_init(struct mlx4_priv *priv)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
uint32_t wq_num_prev = 0;
const char *msg;
@@ -338,7 +340,7 @@ mlx4_rss_init(struct priv *priv)
if (priv->rss_init)
return 0;
- if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) {
ERROR("RSS does not support more than %d queues",
priv->hw_rss_max_qps);
rte_errno = EINVAL;
@@ -356,8 +358,8 @@ mlx4_rss_init(struct priv *priv)
rte_errno = ret;
return -ret;
}
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
struct ibv_cq *cq;
struct ibv_wq *wq;
uint32_t wq_num;
@@ -432,7 +434,7 @@ error:
ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
i, msg, strerror(ret));
while (i--) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq)
mlx4_rxq_detach(rxq);
@@ -451,14 +453,14 @@ error:
* Pointer to private structure.
*/
void
-mlx4_rss_deinit(struct priv *priv)
+mlx4_rss_deinit(struct mlx4_priv *priv)
{
unsigned int i;
if (!priv->rss_init)
return;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq) {
assert(rxq->usecnt == 1);
@@ -493,8 +495,8 @@ mlx4_rxq_attach(struct rxq *rxq)
return 0;
}
- struct priv *priv = rxq->priv;
- struct rte_eth_dev *dev = priv->dev;
+ struct mlx4_priv *priv = rxq->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const uint32_t elts_n = 1 << rxq->elts_n;
const uint32_t sges_n = 1 << rxq->sges_n;
struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
@@ -561,7 +563,7 @@ mlx4_rxq_attach(struct rxq *rxq)
}
/* Pre-register Rx mempool. */
DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
- priv->dev->data->port_id, rxq->stats.idx,
+ ETH_DEV(priv)->data->port_id, rxq->stats.idx,
rxq->mp->name, rxq->mp->nb_mem_chunks);
mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
wqes = (volatile struct mlx4_wqe_data_seg (*)[])
@@ -675,7 +677,7 @@ mlx4_rxq_detach(struct rxq *rxq)
* Supported Tx offloads.
*/
uint64_t
-mlx4_get_rx_queue_offloads(struct priv *priv)
+mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC |
@@ -696,7 +698,7 @@ mlx4_get_rx_queue_offloads(struct priv *priv)
* Supported Rx offloads.
*/
uint64_t
-mlx4_get_rx_port_offloads(struct priv *priv)
+mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -728,7 +730,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
struct rxq *rxq;
@@ -911,17 +913,17 @@ void
mlx4_rx_queue_release(void *dpdk_rxq)
{
struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct priv *priv;
+ struct mlx4_priv *priv;
unsigned int i;
if (rxq == NULL)
return;
priv = rxq->priv;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
- if (priv->dev->data->rx_queues[i] == rxq) {
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
+ if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
DEBUG("%p: removing Rx queue %p from list",
- (void *)priv->dev, (void *)rxq);
- priv->dev->data->rx_queues[i] = NULL;
+ (void *)ETH_DEV(priv), (void *)rxq);
+ ETH_DEV(priv)->data->rx_queues[i] = NULL;
break;
}
assert(!rxq->cq);
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index d7ec4e0c..29389f1e 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -38,7 +38,7 @@ struct mlx4_rxq_stats {
/** Rx queue descriptor. */
struct rxq {
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
struct rte_mempool *mp; /**< Memory pool for allocations. */
struct ibv_cq *cq; /**< Completion queue. */
struct ibv_wq *wq; /**< Work queue. */
@@ -65,7 +65,7 @@ struct rxq {
/** Shared flow target for Rx queues. */
struct mlx4_rss {
LIST_ENTRY(mlx4_rss) next; /**< Next entry in list. */
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
uint32_t refcnt; /**< Reference count for this object. */
uint32_t usecnt; /**< Number of users relying on @p qp and @p ind. */
struct ibv_qp *qp; /**< Queue pair. */
@@ -111,7 +111,7 @@ struct txq {
uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
uint8_t *bounce_buf;
/**< Memory used for storing the first DWORD of data TXBBs. */
- struct priv *priv; /**< Back pointer to private data. */
+ struct mlx4_priv *priv; /**< Back pointer to private data. */
unsigned int socket; /**< CPU socket ID for allocations. */
struct ibv_cq *cq; /**< Completion queue. */
struct ibv_qp *qp; /**< Queue pair. */
@@ -121,9 +121,9 @@ struct txq {
/* mlx4_rxq.c */
uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
-int mlx4_rss_init(struct priv *priv);
-void mlx4_rss_deinit(struct priv *priv);
-struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
+int mlx4_rss_init(struct mlx4_priv *priv);
+void mlx4_rss_deinit(struct mlx4_priv *priv);
+struct mlx4_rss *mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields,
const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[]);
void mlx4_rss_put(struct mlx4_rss *rss);
@@ -131,8 +131,8 @@ int mlx4_rss_attach(struct mlx4_rss *rss);
void mlx4_rss_detach(struct mlx4_rss *rss);
int mlx4_rxq_attach(struct rxq *rxq);
void mlx4_rxq_detach(struct rxq *rxq);
-uint64_t mlx4_get_rx_port_offloads(struct priv *priv);
-uint64_t mlx4_get_rx_queue_offloads(struct priv *priv);
+uint64_t mlx4_get_rx_port_offloads(struct mlx4_priv *priv);
+uint64_t mlx4_get_rx_queue_offloads(struct mlx4_priv *priv);
int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
@@ -152,7 +152,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
/* mlx4_txq.c */
-uint64_t mlx4_get_tx_port_offloads(struct priv *priv);
+uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);
int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 9aa7440d..35270082 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -107,7 +107,7 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
* Supported Tx offloads.
*/
uint64_t
-mlx4_get_tx_port_offloads(struct priv *priv)
+mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
@@ -148,7 +148,7 @@ int
mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4dv_obj mlxdv;
struct mlx4dv_qp dv_qp;
struct mlx4dv_cq dv_cq;
@@ -351,17 +351,17 @@ void
mlx4_tx_queue_release(void *dpdk_txq)
{
struct txq *txq = (struct txq *)dpdk_txq;
- struct priv *priv;
+ struct mlx4_priv *priv;
unsigned int i;
if (txq == NULL)
return;
priv = txq->priv;
- for (i = 0; i != priv->dev->data->nb_tx_queues; ++i)
- if (priv->dev->data->tx_queues[i] == txq) {
+ for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
+ if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
DEBUG("%p: removing Tx queue %p from list",
- (void *)priv->dev, (void *)txq);
- priv->dev->data->tx_queues[i] = NULL;
+ (void *)ETH_DEV(priv), (void *)txq);
+ ETH_DEV(priv)->data->tx_queues[i] = NULL;
break;
}
mlx4_txq_free_elts(txq);
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index e7668bd5..d91d55b5 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -157,9 +157,10 @@ mlx5_prepare_shared_data(void)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_mr_mem_event_cb,
+ NULL);
}
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_mr_mem_event_cb, NULL);
}
rte_spinlock_unlock(&mlx5_shared_data_lock);
}
@@ -200,7 +201,7 @@ mlx5_getenv_int(const char *name)
static void *
mlx5_alloc_verbs_buf(size_t size, void *data)
{
- struct priv *priv = data;
+ struct mlx5_priv *priv = data;
void *ret;
size_t alignment = sysconf(_SC_PAGESIZE);
unsigned int socket = SOCKET_ID_ANY;
@@ -248,7 +249,7 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
static void
mlx5_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret;
@@ -335,7 +336,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
while (i--) {
- struct priv *opriv =
+ struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
@@ -630,7 +631,7 @@ find_lower_va_bound(const struct rte_memseg_list *msl,
static int
mlx5_uar_init_primary(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *addr = (void *)0;
if (uar_base) { /* UAR address space mapped. */
@@ -676,7 +677,7 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
static int
mlx5_uar_init_secondary(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *addr;
assert(priv->uar_base);
@@ -739,7 +740,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
struct ibv_pd *pd = NULL;
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
struct rte_eth_dev *eth_dev = NULL;
- struct priv *priv = NULL;
+ struct mlx5_priv *priv = NULL;
int err = 0;
unsigned int hw_padding = 0;
unsigned int mps;
@@ -1001,7 +1002,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
while (i--) {
- const struct priv *opriv =
+ const struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
@@ -1233,8 +1234,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->config = config;
/* Supported Verbs flow priority number detection. */
err = mlx5_flow_discover_priorities(eth_dev);
- if (err < 0)
+ if (err < 0) {
+ err = -err;
goto error;
+ }
priv->config.flow_prio = err;
/*
* Once the device is added to the list of memory event
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index bc500b2b..91efd21b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -62,7 +62,7 @@ struct mlx5_switch_info {
uint64_t switch_id; /**< Switch identifier. */
};
-LIST_HEAD(mlx5_dev_list, priv);
+LIST_HEAD(mlx5_dev_list, mlx5_priv);
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data {
@@ -172,8 +172,9 @@ struct mlx5_drop {
struct mlx5_flow_tcf_context;
-struct priv {
- LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
+struct mlx5_priv {
+ LIST_ENTRY(mlx5_priv) mem_event_cb;
+ /**< Called by memory event callback. */
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index bfe66558..480b33c8 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -63,10 +63,11 @@
/* Default maximum number of Tx queues for vectorized Tx. */
#if defined(RTE_ARCH_ARM64)
#define MLX5_VPMD_MAX_TXQS 8
+#define MLX5_VPMD_MAX_TXQS_BLUEFIELD 16
#else
#define MLX5_VPMD_MAX_TXQS 4
+#define MLX5_VPMD_MAX_TXQS_BLUEFIELD MLX5_VPMD_MAX_TXQS
#endif
-#define MLX5_VPMD_MAX_TXQS_BLUEFIELD 16
/* Threshold of buffer replenishment for vectorized Rx. */
#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index d178ed6a..fb8e313a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -131,7 +131,7 @@ static int
mlx5_get_master_ifname(const struct rte_eth_dev *dev,
char (*ifname)[IF_NAMESIZE])
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
DIR *dir;
struct dirent *dent;
unsigned int dev_type = 0;
@@ -219,7 +219,7 @@ try_dev_id:
int
mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int ifindex =
priv->nl_socket_rdma >= 0 ?
mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0;
@@ -377,7 +377,7 @@ mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
int
mlx5_dev_configure(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
unsigned int txqs_n = dev->data->nb_tx_queues;
unsigned int i;
@@ -460,7 +460,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
static void
mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
/* Minimum CPU utilization. */
info->default_rxportconf.ring_size = 256;
@@ -499,7 +499,7 @@ mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -540,7 +540,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
while (i--) {
- struct priv *opriv =
+ struct mlx5_priv *opriv =
rte_eth_devices[port_id[i]].data->dev_private;
if (!opriv ||
@@ -609,7 +609,7 @@ static int
mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
};
@@ -685,7 +685,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
@@ -840,7 +840,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
int
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t kern_mtu = 0;
int ret;
@@ -1015,7 +1015,7 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
static uint32_t
mlx5_dev_status_handler(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_async_event event;
uint32_t ret = 0;
@@ -1087,7 +1087,7 @@ mlx5_dev_handler_socket(void *cb_arg)
void
mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv)
@@ -1111,7 +1111,7 @@ mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
void
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
int flags;
@@ -1187,7 +1187,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
eth_tx_burst_t
mlx5_select_tx_function(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
struct mlx5_dev_config *config = &priv->config;
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
@@ -1271,7 +1271,7 @@ int
mlx5_is_removed(struct rte_eth_dev *dev)
{
struct ibv_device_attr device_attr;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
return 1;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ee129b97..222cd81d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -350,6 +350,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
claim_zero(mlx5_glue->destroy_flow(flow));
priority = vprio[i];
}
+ mlx5_hrxq_drop_release(dev);
switch (priority) {
case 8:
priority = RTE_DIM(priority_map_3);
@@ -361,10 +362,9 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
rte_errno = ENOTSUP;
DRV_LOG(ERR,
"port %u verbs maximum priority: %d expected 8/16",
- dev->data->port_id, vprio[i]);
+ dev->data->port_id, priority);
return -rte_errno;
}
- mlx5_hrxq_drop_release(dev);
DRV_LOG(INFO, "port %u flow maximum priority: %d",
dev->data->port_id, priority);
return priority;
@@ -387,7 +387,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
uint32_t subpriority)
{
uint32_t res = 0;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
switch (priv->config.flow_prio) {
case RTE_DIM(priority_map_3):
@@ -536,7 +536,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
static void
flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
const int mark = !!(flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
@@ -599,7 +599,7 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
static void
flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
const int mark = !!(flow->actions &
(MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
@@ -661,7 +661,7 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
static void
flow_rxq_flags_clear(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
@@ -786,7 +786,7 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_drop(uint64_t action_flags,
@@ -829,7 +829,7 @@ mlx5_flow_validate_action_drop(uint64_t action_flags,
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
@@ -838,7 +838,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_queue *queue = action->conf;
if (action_flags & MLX5_FLOW_FATE_ACTIONS)
@@ -875,21 +875,25 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
* Pointer to the Ethernet device structure.
* @param[in] attr
* Attributes of flow that includes this action.
+ * @param[in] item_flags
+ * Items that were detected.
* @param[out] error
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
uint64_t action_flags,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
+ uint64_t item_flags,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *rss = action->conf;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
if (action_flags & MLX5_FLOW_FATE_ACTIONS)
@@ -950,6 +954,11 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
"rss action not supported for "
"egress");
+ if (rss->level > 1 && !tunnel)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "inner RSS is not supported for "
+ "non-tunnel flows");
return 0;
}
@@ -964,7 +973,7 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
@@ -998,7 +1007,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint32_t priority_max = priv->config.flow_prio - 1;
if (attributes->group)
@@ -1462,7 +1471,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_vxlan_gpe *spec = item->spec;
const struct rte_flow_item_vxlan_gpe *mask = item->mask;
int ret;
@@ -1616,7 +1625,7 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
{
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
const struct rte_flow_item_mpls *mask = item->mask;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
if (!priv->config.mpls_en)
@@ -1747,7 +1756,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
static enum mlx5_flow_drv_type
flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
if (attr->transfer)
@@ -1776,7 +1785,7 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static inline int
flow_drv_validate(struct rte_eth_dev *dev,
@@ -1815,7 +1824,7 @@ flow_drv_validate(struct rte_eth_dev *dev,
* Pointer to the error structure.
*
* @return
- * Pointer to device flow on success, otherwise NULL and rte_ernno is set.
+ * Pointer to device flow on success, otherwise NULL and rte_errno is set.
*/
static inline struct mlx5_flow *
flow_drv_prepare(const struct rte_flow *flow,
@@ -1859,7 +1868,7 @@ flow_drv_prepare(const struct rte_flow *flow,
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static inline int
flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
@@ -2121,8 +2130,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- return flow_list_create(dev,
- &((struct priv *)dev->data->dev_private)->flows,
+ struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private;
+
+ return flow_list_create(dev, &priv->flows,
attr, items, actions, error);
}
@@ -2232,7 +2242,7 @@ error:
int
mlx5_flow_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int ret = 0;
@@ -2268,7 +2278,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
struct rte_flow_item_vlan *vlan_spec,
struct rte_flow_item_vlan *vlan_mask)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
.priority = MLX5_FLOW_PRIO_RSVD,
@@ -2359,7 +2369,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
flow_list_destroy(dev, &priv->flows, flow);
return 0;
@@ -2375,7 +2385,7 @@ int
mlx5_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
return 0;
@@ -2392,7 +2402,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
int enable,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
@@ -2470,7 +2480,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct mlx5_fdir *attributes)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_eth_fdir_input *input = &fdir_filter->input;
const struct rte_eth_fdir_masks *mask =
&dev->data->dev_conf.fdir_conf.mask;
@@ -2687,7 +2697,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
static struct rte_flow *
flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
assert(fdir_flow);
@@ -2716,7 +2726,7 @@ static int
flow_fdir_filter_add(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_fdir *fdir_flow;
struct rte_flow *flow;
int ret;
@@ -2763,7 +2773,7 @@ static int
flow_fdir_filter_delete(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow;
struct mlx5_fdir fdir_flow = {
.attr.group = 0,
@@ -2816,7 +2826,7 @@ flow_fdir_filter_update(struct rte_eth_dev *dev,
static void
flow_fdir_filter_flush(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->flows);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4a7c0529..e1424c78 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -363,6 +363,7 @@ int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
uint64_t action_flags,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
+ uint64_t item_flags,
struct rte_flow_error *error);
int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 1f318748..207edcbc 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -293,7 +293,7 @@ flow_dv_encap_decap_resource_register
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
/* Lookup a matching resource from cache. */
@@ -722,7 +722,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint32_t priority_max = priv->config.flow_prio - 1;
if (attributes->group)
@@ -764,7 +764,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
@@ -776,7 +776,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
uint64_t action_flags = 0;
uint64_t item_flags = 0;
uint64_t last_item = 0;
- int tunnel = 0;
uint8_t next_protocol = 0xff;
int actions_n = 0;
@@ -786,7 +785,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
if (ret < 0)
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -958,7 +957,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
case RTE_FLOW_ACTION_TYPE_RSS:
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
- attr, error);
+ attr, item_flags,
+ error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_RSS;
@@ -1043,7 +1043,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
*
* @return
* Pointer to mlx5_flow object on success,
- * otherwise NULL and rte_ernno is set.
+ * otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
@@ -1800,7 +1800,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
@@ -1873,7 +1873,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
* Pointer to the error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_dv_translate(struct rte_eth_dev *dev,
@@ -1883,7 +1883,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
uint64_t item_flags = 0;
uint64_t last_item = 0;
diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c
index 96b9dd72..92f984f9 100644
--- a/drivers/net/mlx5/mlx5_flow_tcf.c
+++ b/drivers/net/mlx5/mlx5_flow_tcf.c
@@ -2344,7 +2344,7 @@ flow_tcf_validate(struct rte_eth_dev *dev,
*/
if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
(action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
- ((struct priv *)port_id_dev->data->dev_private)->representor)
+ ((struct mlx5_priv *)port_id_dev->data->dev_private)->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan push can only be applied"
@@ -5321,7 +5321,7 @@ flow_tcf_check_inhw(struct mlx5_flow_tcf_context *tcf,
static void
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@@ -5410,7 +5410,7 @@ static int
flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@@ -5894,7 +5894,7 @@ flow_tcf_query_count(struct rte_eth_dev *dev,
{
struct flow_tcf_stats_basic sb_data;
struct rte_flow_query_count *qc = data;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mnl_socket *nl = ctx->nl;
struct mlx5_flow *dev_flow;
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 409e1cd0..1fdbca3d 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -55,7 +55,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
struct mlx5_flow_counter *counter)
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_counter_set_init_attr init = {
.counter_set_id = counter->id};
@@ -66,7 +66,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
}
return 0;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_counters_init_attr init = {0};
struct ibv_counter_attach_attr attach;
int ret;
@@ -117,7 +117,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
static struct mlx5_flow_counter *
flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter *cnt;
int ret;
@@ -1191,7 +1191,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RSS:
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
- attr,
+ attr, item_flags,
error);
if (ret < 0)
return ret;
@@ -1383,7 +1383,7 @@ flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
* Pointer to the error structure.
*
* @return
- * 0 on success, else a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, else a negative errno value otherwise and rte_errno is set.
*/
static int
flow_verbs_translate(struct rte_eth_dev *dev,
@@ -1398,7 +1398,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
uint64_t action_flags = 0;
uint64_t priority = attr->priority;
uint32_t subpriority = 0;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 672a4761..bce026f9 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -67,7 +67,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
static void
mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const int vf = priv->config.vf;
assert(index < MLX5_MAX_MAC_ADDRESSES);
@@ -96,7 +96,7 @@ static int
mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const int vf = priv->config.vf;
unsigned int i;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 442b2d23..01bf5910 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -336,7 +336,7 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
static int
mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int n;
DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
@@ -377,7 +377,7 @@ static struct mlx5_mr *
mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
/* Iterate all the existing MRs. */
@@ -418,7 +418,7 @@ static uint32_t
mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t idx;
uint32_t lkey = UINT32_MAX;
struct mlx5_mr *mr;
@@ -465,7 +465,7 @@ mr_free(struct mlx5_mr *mr)
}
/**
- * Releass resources of detached MR having no online entry.
+ * Release resources of detached MR having no online entry.
*
* @param dev
* Pointer to Ethernet device.
@@ -473,7 +473,7 @@ mr_free(struct mlx5_mr *mr)
static void
mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr_next;
struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
@@ -515,7 +515,7 @@ mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
}
/**
- * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Create a new global Memory Region (MR) for a missing virtual address.
* Register entire virtually contiguous memory chunk around the address.
*
* @param dev
@@ -533,7 +533,7 @@ static uint32_t
mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
@@ -623,7 +623,7 @@ alloc_resources:
bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
if (mr->ms_bmp == NULL) {
- DEBUG("port %u unable to initialize bitamp for a new MR of"
+ DEBUG("port %u unable to initialize bitmap for a new MR of"
" address (%p).",
dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
@@ -769,7 +769,7 @@ err_nolock:
static void
mr_rebuild_dev_cache(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
@@ -803,7 +803,7 @@ mr_rebuild_dev_cache(struct rte_eth_dev *dev)
static void
mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_memseg_list *msl;
struct mlx5_mr *mr;
int ms_n;
@@ -888,9 +888,11 @@ void
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
- struct priv *priv;
+ struct mlx5_priv *priv;
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
+ /* Must be called from the primary process. */
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
@@ -926,7 +928,7 @@ static uint32_t
mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
struct mlx5_mr_cache *entry, uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
uint16_t idx;
uint32_t lkey;
@@ -1026,11 +1028,8 @@ mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
- struct priv *priv = rxq_ctrl->priv;
+ struct mlx5_priv *priv = rxq_ctrl->priv;
- DRV_LOG(DEBUG,
- "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
@@ -1051,11 +1050,8 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq_ctrl->priv;
+ struct mlx5_priv *priv = txq_ctrl->priv;
- DRV_LOG(DEBUG,
- "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
@@ -1128,7 +1124,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
{
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx5_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
@@ -1136,6 +1132,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
struct mlx5_mr_cache entry;
uint32_t lkey;
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* If already registered, it should return. */
rte_rwlock_read_lock(&priv->mr.rwlock);
lkey = mr_lookup_dev(dev, &entry, addr);
@@ -1235,8 +1232,17 @@ mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq_ctrl->priv;
+ struct mlx5_priv *priv = txq_ctrl->priv;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DRV_LOG(WARNING,
+ "port %u using address (%p) from unregistered mempool"
+ " having externally allocated memory"
+ " in secondary process, please create mempool"
+ " prior to rte_eth_dev_start()",
+ PORT_ID(priv), (void *)addr);
+ return UINT32_MAX;
+ }
mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx5_tx_addr2mr_bh(txq, addr);
}
@@ -1301,7 +1307,7 @@ void
mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
{
#ifndef NDEBUG
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
int mr_n = 0;
int chunk_n = 0;
@@ -1343,8 +1349,8 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
void
mlx5_mr_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next;
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
@@ -1354,6 +1360,7 @@ mlx5_mr_release(struct rte_eth_dev *dev)
mlx5_mr_dump_dev(dev);
rte_rwlock_write_lock(&priv->mr.rwlock);
/* Detach from MR list and move to free list. */
+ mr_next = LIST_FIRST(&priv->mr.mr_list);
while (mr_next != NULL) {
struct mlx5_mr *mr = mr_next;
diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c
index d61826ae..fe5a2746 100644
--- a/drivers/net/mlx5/mlx5_nl.c
+++ b/drivers/net/mlx5/mlx5_nl.c
@@ -361,7 +361,7 @@ static int
mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[],
int *mac_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
@@ -420,7 +420,7 @@ static int
mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac,
int add)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
@@ -492,7 +492,7 @@ int
mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
ret = mlx5_nl_mac_addr_modify(dev, mac, 1);
@@ -520,7 +520,7 @@ int
mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
BITFIELD_RESET(priv->mac_own, index);
return mlx5_nl_mac_addr_modify(dev, mac, 0);
@@ -572,7 +572,7 @@ mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev)
void
mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int i;
for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) {
@@ -599,7 +599,7 @@ mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev)
static int
mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index b95778a8..891d764b 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -41,7 +41,7 @@ int
mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int idx;
@@ -95,7 +95,7 @@ int
mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (!rss_conf) {
rte_errno = EINVAL;
@@ -125,7 +125,7 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
int
mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
void *mem;
unsigned int old_size = priv->reta_idx_n;
@@ -165,7 +165,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
@@ -201,7 +201,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t reta_size)
{
int ret;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
unsigned int pos;
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index e74fdef8..d5077db0 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -32,7 +32,7 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 1;
@@ -60,7 +60,7 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 0;
@@ -81,7 +81,7 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 1;
@@ -109,7 +109,7 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 0;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 10b6ce0c..f1ce3170 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -68,7 +68,7 @@ static_assert(MLX5_RSS_HASH_KEY_LEN ==
inline int
mlx5_check_mprq_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (priv->config.mprq.enabled &&
priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
@@ -103,7 +103,7 @@ mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
inline int
mlx5_mprq_enabled(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
uint16_t n = 0;
@@ -382,7 +382,7 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
uint64_t
mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
@@ -438,7 +438,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
@@ -489,7 +489,7 @@ mlx5_rx_queue_release(void *dpdk_rxq)
{
struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
- struct priv *priv;
+ struct mlx5_priv *priv;
if (rxq == NULL)
return;
@@ -514,7 +514,7 @@ mlx5_rx_queue_release(void *dpdk_rxq)
int
mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
@@ -592,7 +592,7 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
void
mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_intr_handle *intr_handle = dev->intr_handle;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
@@ -664,7 +664,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
@@ -702,7 +702,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_rxq_ibv *rxq_ibv = NULL;
@@ -730,6 +730,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq_data->cq_arm_sn++;
mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+ mlx5_rxq_ibv_release(rxq_ibv);
return 0;
exit:
ret = rte_errno; /* Save rte_errno before cleanup. */
@@ -755,7 +756,7 @@ exit:
struct mlx5_rxq_ibv *
mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -1042,7 +1043,7 @@ error:
struct mlx5_rxq_ibv *
mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl;
@@ -1098,7 +1099,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
int
mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_rxq_ibv *rxq_ibv;
@@ -1149,7 +1150,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
int
mlx5_mprq_free_mp(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_mempool *mp = priv->mprq_mp;
unsigned int i;
@@ -1200,7 +1201,7 @@ mlx5_mprq_free_mp(struct rte_eth_dev *dev)
int
mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_mempool *mp = priv->mprq_mp;
char name[RTE_MEMPOOL_NAMESIZE];
unsigned int desc = 0;
@@ -1272,7 +1273,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
return -rte_errno;
}
}
- snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
+ snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init, NULL,
dev->device->numa_node, 0);
@@ -1319,7 +1320,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int mprq_stride_size;
@@ -1494,7 +1495,7 @@ error:
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* A pointer to the queue if it exists, NULL otherwise.
@@ -1502,7 +1503,7 @@ error:
struct mlx5_rxq_ctrl *
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
if ((*priv->rxqs)[idx]) {
@@ -1521,7 +1522,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* 1 while a reference on it exists, 0 when freed.
@@ -1529,7 +1530,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx])
@@ -1554,7 +1555,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* 1 if the queue can be released, negative errno otherwise and rte_errno is
@@ -1563,7 +1564,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx]) {
@@ -1586,7 +1587,7 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_rxq_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
@@ -1615,7 +1616,7 @@ struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
@@ -1679,7 +1680,7 @@ struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
@@ -1741,7 +1742,7 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
int
mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
int ret = 0;
@@ -1783,7 +1784,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint16_t *queues, uint32_t queues_n,
int tunnel __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
@@ -1899,7 +1900,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
queues_n = hash_fields ? queues_n : 1;
@@ -1962,7 +1963,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
int
mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
int ret = 0;
@@ -1987,7 +1988,7 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
struct mlx5_rxq_ibv *
mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_cq *cq;
struct ibv_wq *wq = NULL;
struct mlx5_rxq_ibv *rxq;
@@ -2046,7 +2047,7 @@ error:
void
mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
if (rxq->wq)
@@ -2069,7 +2070,7 @@ mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
struct mlx5_rxq_ibv *rxq;
struct mlx5_ind_table_ibv tmpl;
@@ -2112,7 +2113,7 @@ error:
void
mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
@@ -2133,7 +2134,7 @@ mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
struct mlx5_hrxq *
mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
struct mlx5_hrxq *hrxq;
@@ -2196,7 +2197,7 @@ error:
void
mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 6eceea5f..38ce0e29 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -509,7 +509,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
uint32_t
mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq;
if (dev->rx_pkt_burst != mlx5_rx_burst) {
@@ -693,7 +693,8 @@ pkt_inline:
RTE_CACHE_LINE_SIZE);
copy_b = (addr_end > addr) ?
RTE_MIN((addr_end - addr), length) : 0;
- if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
+ if (copy_b && ((end - (uintptr_t)raw) >
+ (copy_b + sizeof(inl)))) {
/*
* One Dseg remains in the current WQE. To
* keep the computation positive, it is
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 75194a3f..820675b8 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -58,7 +58,7 @@ struct mlx5_txq_stats {
uint64_t oerrors; /**< Total number of failed transmitted packets. */
};
-struct priv;
+struct mlx5_priv;
/* Compressed CQE context. */
struct rxq_zip {
@@ -143,7 +143,7 @@ struct mlx5_rxq_ctrl {
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
- struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
@@ -228,7 +228,7 @@ struct mlx5_txq_ctrl {
unsigned int max_inline_data; /* Max inline data. */
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
- struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
@@ -491,7 +491,7 @@ check_cqe(volatile struct mlx5_cqe *cqe,
op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
- sizeof(*err_cqe));
+ sizeof(*cqe));
}
return 1;
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
@@ -568,6 +568,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
#endif /* NDEBUG */
++cq_ci;
+ rte_cio_rmb();
txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
ctrl = (volatile struct mlx5_wqe_ctrl *)
tx_mlx5_wqe(txq, txq->wqe_pi);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 340292ad..9a3a5ae4 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -273,7 +273,7 @@ mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev)
int __attribute__((cold))
mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
if (!priv->config.tx_vec_en ||
@@ -318,7 +318,7 @@ mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
int __attribute__((cold))
mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
if (!priv->config.rx_vec_en)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index fda7004e..86735044 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -102,7 +102,22 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
return;
}
for (i = 0; i < n; ++i) {
- wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
+ void *buf_addr;
+
+ /*
+ * Load the virtual address for Rx WQE. non-x86 processors
+ * (mostly RISC such as ARM and Power) are more vulnerable to
+ * load stall. For x86, reducing the number of instructions
+ * seems to matter most.
+ */
+#ifdef RTE_ARCH_X86_64
+ buf_addr = elts[i]->buf_addr;
+#else
+ buf_addr = (char *)elts[i] + sizeof(struct rte_mbuf) +
+ rte_pktmbuf_priv_size(rxq->mp);
+ assert(buf_addr == elts[i]->buf_addr);
+#endif
+ wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
RTE_PKTMBUF_HEADROOM);
/* If there's only one MR, no need to replace LKey in WQE. */
if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 883fe1bf..38e915c5 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -104,6 +104,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
unsigned int n;
volatile struct mlx5_wqe *wqe = NULL;
+ bool metadata_ol =
+ txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
@@ -127,6 +129,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
uint8x16_t *t_wqe;
uint8_t *dseg;
uint8x16_t ctrl;
+ rte_be32_t metadata =
+ metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
+ buf->tx_metadata : 0;
assert(segs_n);
max_elts = elts_n - (elts_head - txq->elts_tail);
@@ -164,9 +169,10 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
- vst1q_u16((void *)(t_wqe + 1),
- ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
- 0, 0, 0, 0 }));
+ vst1q_u32((void *)(t_wqe + 1),
+ ((uint32x4_t){ 0,
+ cs_flags << 16 | rte_cpu_to_be_16(len),
+ metadata, 0 }));
txq->wqe_ci = wqe_ci;
}
if (!n)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 14117c4b..fb384efd 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -104,6 +104,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
unsigned int n;
volatile struct mlx5_wqe *wqe = NULL;
+ bool metadata_ol =
+ txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
@@ -125,6 +127,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
uint16_t max_wqe;
__m128i *t_wqe, *dseg;
__m128i ctrl;
+ rte_be32_t metadata =
+ metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
+ buf->tx_metadata : 0;
assert(segs_n);
max_elts = elts_n - (elts_head - txq->elts_tail);
@@ -165,9 +170,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
_mm_store_si128(t_wqe, ctrl);
/* Fill ESEG in the header. */
_mm_store_si128(t_wqe + 1,
- _mm_set_epi16(0, 0, 0, 0,
- rte_cpu_to_be_16(len), cs_flags,
- 0, 0));
+ _mm_set_epi32(0, metadata,
+ (rte_cpu_to_be_16(len) << 16) |
+ cs_flags, 0));
txq->wqe_ci = wqe_ci;
}
if (!n)
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 00106171..41cac3c6 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -26,7 +26,7 @@
int
mlx5_socket_init(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
@@ -98,7 +98,7 @@ error:
void
mlx5_socket_uninit(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket);
claim_zero(close(priv->primary_socket));
@@ -115,7 +115,7 @@ mlx5_socket_uninit(struct rte_eth_dev *dev)
void
mlx5_socket_handle(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int conn_sock;
int ret = 0;
struct cmsghdr *cmsg = NULL;
@@ -208,7 +208,7 @@ error:
int
mlx5_socket_connect(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index fccb9af0..132bf5b4 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -143,7 +143,7 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
static int
mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
struct ifreq ifr;
@@ -221,7 +221,7 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
void
mlx5_xstats_init(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
unsigned int j;
@@ -312,7 +312,7 @@ int
mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
unsigned int n)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
uint64_t counters[n];
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
@@ -353,7 +353,7 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_eth_stats tmp;
unsigned int i;
unsigned int idx;
@@ -416,7 +416,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
void
mlx5_stats_reset(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int idx;
@@ -448,7 +448,7 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
void
mlx5_xstats_reset(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
unsigned int i;
@@ -492,7 +492,7 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
unsigned int i;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int mlx5_xstats_n = xstats_ctrl->mlx5_stats_n;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index e2a9bb70..f874657c 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -23,7 +23,7 @@
static void
mlx5_txq_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->txqs_n; ++i)
@@ -42,7 +42,7 @@ mlx5_txq_stop(struct rte_eth_dev *dev)
static int
mlx5_txq_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret;
@@ -83,7 +83,7 @@ error:
static void
mlx5_rxq_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i)
@@ -102,7 +102,7 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)
static int
mlx5_rxq_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
@@ -157,7 +157,7 @@ error:
int
mlx5_dev_start(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
@@ -221,7 +221,7 @@ error:
void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
@@ -252,7 +252,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
int
mlx5_traffic_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_item_eth bcast = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
@@ -379,7 +379,7 @@ error:
void
mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->ctrl_flows);
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b01bd675..c5a3d1b4 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -102,7 +102,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
uint64_t
mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT);
struct mlx5_dev_config *config = &priv->config;
@@ -155,7 +155,7 @@ int
mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
@@ -213,7 +213,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
{
struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
struct mlx5_txq_ctrl *txq_ctrl;
- struct priv *priv;
+ struct mlx5_priv *priv;
unsigned int i;
if (txq == NULL)
@@ -246,7 +246,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
int
mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i, j;
uintptr_t pages[priv->txqs_n];
unsigned int pages_n = 0;
@@ -346,7 +346,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
* @param dev
* Pointer to Ethernet device.
* @param idx
- * Queue index in DPDK Rx queue array
+ * Queue index in DPDK Tx queue array.
*
* @return
* The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -354,7 +354,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
struct mlx5_txq_ibv *
mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
@@ -554,7 +554,7 @@ error:
* @param dev
* Pointer to Ethernet device.
* @param idx
- * Queue index in DPDK Rx queue array
+ * Queue index in DPDK Tx queue array.
*
* @return
* The Verbs object if it exists.
@@ -562,7 +562,7 @@ error:
struct mlx5_txq_ibv *
mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
if (idx >= priv->txqs_n)
@@ -623,7 +623,7 @@ mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
int
mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_txq_ibv *txq_ibv;
@@ -636,6 +636,27 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
}
/**
+ * Calcuate the total number of WQEBB for Tx queue.
+ *
+ * Simplified version of calc_sq_size() in rdma-core.
+ *
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
+ *
+ * @return
+ * The number of WQEBB.
+ */
+static int
+txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
+{
+ unsigned int wqe_size;
+ const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
+
+ wqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;
+ return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
+}
+
+/**
* Set Tx queue parameters from device configuration.
*
* @param txq_ctrl
@@ -644,7 +665,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
static void
txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
- struct priv *priv = txq_ctrl->priv;
+ struct mlx5_priv *priv = txq_ctrl->priv;
struct mlx5_dev_config *config = &priv->config;
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
@@ -754,7 +775,7 @@ struct mlx5_txq_ctrl *
mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
tmpl = rte_calloc_socket("TXQ", 1,
@@ -780,10 +801,16 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->txq.elts_n = log2above(desc);
tmpl->idx = idx;
txq_set_params(tmpl);
- DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
- DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ if (txq_calc_wqebb_cnt(tmpl) >
+ priv->device_attr.orig_attr.max_qp_wr) {
+ DRV_LOG(ERR,
+ "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
+ " try smaller queue size",
+ dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
+ priv->device_attr.orig_attr.max_qp_wr);
+ rte_errno = ENOMEM;
+ goto error;
+ }
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
@@ -809,7 +836,7 @@ error:
struct mlx5_txq_ctrl *
mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *ctrl = NULL;
if ((*priv->txqs)[idx]) {
@@ -835,7 +862,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
size_t page_size = sysconf(_SC_PAGESIZE);
@@ -872,7 +899,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
if (!(*priv->txqs)[idx])
@@ -893,7 +920,7 @@ mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_txq_verify(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
int ret = 0;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index c91d08be..6568a3a4 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -48,7 +48,7 @@
int
mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
@@ -102,7 +102,7 @@ out:
void
mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
@@ -160,7 +160,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
int
mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
if (mask & ETH_VLAN_STRIP_MASK) {
diff --git a/drivers/net/mvpp2/mrvl_mtr.c b/drivers/net/mvpp2/mrvl_mtr.c
index 9adcd975..39272ace 100644
--- a/drivers/net/mvpp2/mrvl_mtr.c
+++ b/drivers/net/mvpp2/mrvl_mtr.c
@@ -1,5 +1,4 @@
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Marvell International Ltd.
* Copyright(c) 2018 Semihalf.
* All rights reserved.
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index da76b0db..fad209f2 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -733,6 +733,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
hv->port_id = eth_dev->data->port_id;
hv->latency = HN_CHAN_LATENCY_NS;
hv->max_queues = 1;
+ hv->vf_port = HN_INVALID_PORT;
err = hn_parse_args(eth_dev);
if (err)
@@ -786,7 +787,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
/* If VF was reported but not added, do it now */
- if (hv->vf_present && !hv->vf_dev) {
+ if (hv->vf_present && !hn_vf_attached(hv)) {
PMD_INIT_LOG(DEBUG, "Adding VF device");
err = hn_vf_add(eth_dev, hv);
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
index 487f7646..5ffc0ee1 100644
--- a/drivers/net/netvsc/hn_rxtx.c
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -123,7 +123,7 @@ hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
stats->size_bins[0]++;
else if (s < 1519)
stats->size_bins[6]++;
- else if (s >= 1519)
+ else
stats->size_bins[7]++;
}
@@ -1305,8 +1305,8 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return 0;
/* Transmit over VF if present and up */
- vf_dev = hv->vf_dev;
- rte_compiler_barrier();
+ vf_dev = hn_get_vf_dev(hv);
+
if (vf_dev && vf_dev->data->dev_started) {
void *sub_q = vf_dev->data->tx_queues[queue_id];
@@ -1385,6 +1385,24 @@ fail:
return nb_tx;
}
+static uint16_t
+hn_recv_vf(uint16_t vf_port, const struct hn_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint16_t i, n;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ n = rte_eth_rx_burst(vf_port, rxq->queue_id, rx_pkts, nb_pkts);
+
+ /* relabel the received mbufs */
+ for (i = 0; i < n; i++)
+ rx_pkts[i]->port = rxq->port_id;
+
+ return n;
+}
+
uint16_t
hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
@@ -1396,30 +1414,21 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (unlikely(hv->closed))
return 0;
- vf_dev = hv->vf_dev;
- rte_compiler_barrier();
+ /* Receive from VF if present and up */
+ vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->data->dev_started) {
- /* Normally, with SR-IOV the ring buffer will be empty */
+ /* Check for new completions */
+ if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts))
hn_process_events(hv, rxq->queue_id, 0);
- /* Get mbufs some bufs off of staging ring */
- nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
- (void **)rx_pkts,
- nb_pkts / 2, NULL);
- /* And rest off of VF */
- nb_rcv += rte_eth_rx_burst(vf_dev->data->port_id,
- rxq->queue_id,
- rx_pkts + nb_rcv, nb_pkts - nb_rcv);
- } else {
- /* If receive ring is not full then get more */
- if (rte_ring_count(rxq->rx_ring) < nb_pkts)
- hn_process_events(hv, rxq->queue_id, 0);
+ /* Always check the vmbus path for multicast and new flows */
+ nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
+ (void **)rx_pkts, nb_pkts, NULL);
- nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
- (void **)rx_pkts,
- nb_pkts, NULL);
- }
+ /* If VF is available, check that as well */
+ if (vf_dev && vf_dev->data->dev_started)
+ nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
+ rx_pkts + nb_rcv, nb_pkts - nb_rcv);
return nb_rcv;
}
diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
index a6516c1e..b3156343 100644
--- a/drivers/net/netvsc/hn_var.h
+++ b/drivers/net/netvsc/hn_var.h
@@ -91,14 +91,18 @@ struct hn_rx_bufinfo {
struct rte_mbuf_ext_shared_info shinfo;
} __rte_cache_aligned;
+#define HN_INVALID_PORT UINT16_MAX
+
struct hn_data {
struct rte_vmbus_device *vmbus;
struct hn_rx_queue *primary;
- struct rte_eth_dev *vf_dev; /* Subordinate device */
rte_spinlock_t vf_lock;
uint16_t port_id;
- bool closed;
- bool vf_present;
+ uint16_t vf_port;
+
+ uint8_t vf_present;
+ uint8_t closed;
+
uint32_t link_status;
uint32_t link_speed;
@@ -169,6 +173,28 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_mempool *mp);
void hn_dev_rx_queue_release(void *arg);
+/* Check if VF is attached */
+static inline bool
+hn_vf_attached(const struct hn_data *hv)
+{
+ return hv->vf_port != HN_INVALID_PORT;
+}
+
+/* Get VF device for existing netvsc device */
+static inline struct rte_eth_dev *
+hn_get_vf_dev(const struct hn_data *hv)
+{
+ uint16_t vf_port = hv->vf_port;
+
+ /* make sure vf_port is loaded */
+ rte_smp_rmb();
+
+ if (vf_port == HN_INVALID_PORT)
+ return NULL;
+ else
+ return &rte_eth_devices[vf_port];
+}
+
void hn_vf_info_get(struct hn_data *hv,
struct rte_eth_dev_info *info);
int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c
index 3f714ec9..4127e411 100644
--- a/drivers/net/netvsc/hn_vf.c
+++ b/drivers/net/netvsc/hn_vf.c
@@ -10,8 +10,8 @@
#include <errno.h>
#include <unistd.h>
#include <dirent.h>
+#include <fcntl.h>
#include <sys/types.h>
-#include <sys/fcntl.h>
#include <sys/uio.h>
#include <rte_ether.h>
@@ -51,15 +51,20 @@ static int hn_vf_match(const struct rte_eth_dev *dev)
return -ENOENT;
}
+
/*
* Attach new PCI VF device and return the port_id
*/
-static int hn_vf_attach(struct hn_data *hv, uint16_t port_id,
- struct rte_eth_dev **vf_dev)
+static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
{
struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
int ret;
+ if (hn_vf_attached(hv)) {
+ PMD_DRV_LOG(ERR, "VF already attached");
+ return -EEXIST;
+ }
+
ret = rte_eth_dev_owner_get(port_id, &owner);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
@@ -79,8 +84,9 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id,
}
PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
+ hv->vf_port = port_id;
rte_smp_wmb();
- *vf_dev = &rte_eth_devices[port_id];
+
return 0;
}
@@ -96,12 +102,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
}
rte_spinlock_lock(&hv->vf_lock);
- if (hv->vf_dev) {
- PMD_DRV_LOG(ERR, "VF already attached");
- err = -EBUSY;
- } else {
- err = hn_vf_attach(hv, port, &hv->vf_dev);
- }
+ err = hn_vf_attach(hv, port);
if (err == 0) {
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
@@ -120,22 +121,22 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
/* Remove new VF device */
static void hn_vf_remove(struct hn_data *hv)
{
- struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
- if (!vf_dev) {
+
+ if (!hn_vf_attached(hv)) {
PMD_DRV_LOG(ERR, "VF path not active");
- rte_spinlock_unlock(&hv->vf_lock);
- return;
- }
+ } else {
+ /* Stop incoming packets from arriving on VF */
+ hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
- /* Stop incoming packets from arriving on VF */
- hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
- hv->vf_dev = NULL;
+ /* Stop transmission over VF */
+ hv->vf_port = HN_INVALID_PORT;
+ rte_smp_wmb();
- /* Give back ownership */
- rte_eth_dev_owner_unset(vf_dev->data->port_id, hv->owner.id);
+ /* Give back ownership */
+ rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
+ }
rte_spinlock_unlock(&hv->vf_lock);
}
@@ -207,7 +208,7 @@ void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
hn_vf_info_merge(vf_dev, info);
rte_spinlock_unlock(&hv->vf_lock);
@@ -221,7 +222,7 @@ int hn_vf_link_update(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->link_update)
ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
rte_spinlock_unlock(&hv->vf_lock);
@@ -249,13 +250,14 @@ static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
}
static int _hn_vf_configure(struct rte_eth_dev *dev,
- struct rte_eth_dev *vf_dev,
+ uint16_t vf_port,
const struct rte_eth_conf *dev_conf)
{
struct rte_eth_conf vf_conf = *dev_conf;
- uint16_t vf_port = vf_dev->data->port_id;
+ struct rte_eth_dev *vf_dev;
int ret;
+ vf_dev = &rte_eth_devices[vf_port];
if (dev_conf->intr_conf.lsc &&
(vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
@@ -294,13 +296,11 @@ int hn_vf_configure(struct rte_eth_dev *dev,
const struct rte_eth_conf *dev_conf)
{
struct hn_data *hv = dev->data->dev_private;
- struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
- if (vf_dev)
- ret = _hn_vf_configure(dev, vf_dev, dev_conf);
+ if (hv->vf_port != HN_INVALID_PORT)
+ ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
@@ -312,7 +312,7 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
const uint32_t *ptypes = NULL;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
rte_spinlock_unlock(&hv->vf_lock);
@@ -327,7 +327,7 @@ int hn_vf_start(struct rte_eth_dev *dev)
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_start(vf_dev->data->port_id);
rte_spinlock_unlock(&hv->vf_lock);
@@ -340,7 +340,7 @@ void hn_vf_stop(struct rte_eth_dev *dev)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
rte_eth_dev_stop(vf_dev->data->port_id);
rte_spinlock_unlock(&hv->vf_lock);
@@ -352,7 +352,7 @@ void hn_vf_stop(struct rte_eth_dev *dev)
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
rte_spinlock_lock(&hv->vf_lock); \
- vf_dev = hv->vf_dev; \
+ vf_dev = hn_get_vf_dev(hv); \
if (vf_dev) \
func(vf_dev->data->port_id); \
rte_spinlock_unlock(&hv->vf_lock); \
@@ -402,7 +402,7 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
mc_addr_set, nb_mc_addr);
@@ -420,7 +420,7 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
@@ -434,7 +434,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
void *subq = vf_dev->data->tx_queues[queue_id];
@@ -455,7 +455,7 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
@@ -469,7 +469,7 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
void *subq = vf_dev->data->rx_queues[queue_id];
@@ -486,7 +486,7 @@ int hn_vf_stats_get(struct rte_eth_dev *dev,
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev)
ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
rte_spinlock_unlock(&hv->vf_lock);
@@ -503,7 +503,7 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
char tmp[RTE_ETH_XSTATS_NAME_SIZE];
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->xstats_get_names)
count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
rte_spinlock_unlock(&hv->vf_lock);
@@ -528,7 +528,7 @@ int hn_vf_xstats_get(struct rte_eth_dev *dev,
int count = 0;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->xstats_get)
count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
rte_spinlock_unlock(&hv->vf_lock);
@@ -542,7 +542,7 @@ void hn_vf_xstats_reset(struct rte_eth_dev *dev)
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
- vf_dev = hv->vf_dev;
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->dev_ops->xstats_reset)
vf_dev->dev_ops->xstats_reset(vf_dev);
rte_spinlock_unlock(&hv->vf_lock);
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 54c6da92..68c853c9 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -566,7 +566,10 @@ nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
/* Signal the NIC about the change */
update = NFP_NET_CFG_UPDATE_MACADDR;
- ctrl = hw->ctrl | NFP_NET_CFG_CTRL_LIVE_ADDR;
+ ctrl = hw->ctrl;
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+ ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
if (nfp_net_reconfig(hw, ctrl, update) < 0) {
PMD_INIT_LOG(INFO, "MAC address update failed");
return -EIO;
@@ -758,7 +761,7 @@ nfp_net_start(struct rte_eth_dev *dev)
return -EIO;
/*
- * Allocating rte mbuffs for configured rx queues.
+ * Allocating rte mbufs for configured rx queues.
* This requires queues being enabled before
*/
if (nfp_net_rx_freelist_setup(dev) < 0) {
@@ -1487,7 +1490,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
if (rxq == NULL)
return -ENOMEM;
- /* Hw queues mapping based on firmware confifguration */
+ /* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
@@ -1519,7 +1522,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (tz == NULL) {
- PMD_DRV_LOG(ERR, "Error allocatig rx dma");
+ PMD_DRV_LOG(ERR, "Error allocating rx dma");
nfp_net_rx_queue_release(rxq);
return -ENOMEM;
}
@@ -1906,7 +1909,7 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
/*
* RX path design:
*
- * There are some decissions to take:
+ * There are some decisions to take:
* 1) How to check DD RX descriptors bit
* 2) How and when to allocate new mbufs
*
@@ -1976,7 +1979,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_rmb();
/*
- * We got a packet. Let's alloc a new mbuff for refilling the
+ * We got a packet. Let's alloc a new mbuf for refilling the
* free descriptor ring as soon as possible
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
@@ -1991,8 +1994,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_hold++;
/*
- * Grab the mbuff and refill the descriptor with the
- * previously allocated mbuff
+ * Grab the mbuf and refill the descriptor with the
+ * previously allocated mbuf
*/
mb = rxb->mbuf;
rxb->mbuf = new_mb;
@@ -2024,7 +2027,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return -EINVAL;
}
- /* Filling the received mbuff with packet info */
+ /* Filling the received mbuf with packet info */
if (hw->rx_offset)
mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
else
@@ -2049,7 +2052,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
- /* Adding the mbuff to the mbuff array passed by the app */
+ /* Adding the mbuf to the mbuf array passed by the app */
rx_pkts[avail++] = mb;
/* Now resetting and updating the descriptor */
@@ -2443,7 +2446,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
for (j = 0; j < 4; j++) {
if (!(mask & (0x1 << j)))
continue;
- reta_conf->reta[shift + j] =
+ reta_conf[idx].reta[shift + j] =
(uint8_t)((reta >> (8 * j)) & 0xF);
}
}
@@ -2789,9 +2792,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
case PCI_DEVICE_ID_NFP6000_PF_NIC:
case PCI_DEVICE_ID_NFP6000_VF_NIC:
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
- tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
+ tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
- rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
+ rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
break;
default:
PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
@@ -2954,9 +2957,9 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
return -ENOMEM;
if (ports > 1)
- sprintf(port_name, "%s_port%d", dev->device.name, port);
+ snprintf(port_name, 100, "%s_port%d", dev->device.name, port);
else
- sprintf(port_name, "%s", dev->device.name);
+ strlcat(port_name, dev->device.name, 100);
eth_dev = rte_eth_dev_allocate(port_name);
if (!eth_dev)
@@ -3021,28 +3024,31 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
/* Looking for firmware file in order of priority */
/* First try to find a firmware image specific for this device */
- sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
+ snprintf(serial, sizeof(serial),
+ "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
cpp->interface & 0xff);
- sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
+ snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
+ serial);
PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
fw_f = open(fw_name, O_RDONLY);
- if (fw_f > 0)
+ if (fw_f >= 0)
goto read_fw;
/* Then try the PCI name */
- sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name);
+ snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
+ dev->device.name);
PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
fw_f = open(fw_name, O_RDONLY);
- if (fw_f > 0)
+ if (fw_f >= 0)
goto read_fw;
/* Finally try the card type and media */
- sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card);
+ snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
fw_f = open(fw_name, O_RDONLY);
if (fw_f < 0) {
@@ -3118,8 +3124,9 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
- sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model,
- nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000);
+ snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
+ nfp_fw_model, nfp_eth_table->count,
+ nfp_eth_table->ports[0].speed / 1000);
nsp = nfp_nsp_open(cpp);
if (!nsp) {
diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h
index 21e17da1..fc3540a2 100644
--- a/drivers/net/nfp/nfp_net_ctrl.h
+++ b/drivers/net/nfp/nfp_net_ctrl.h
@@ -122,7 +122,7 @@
#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */
#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
-#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1U << 31)/* live MAC addr change */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -135,7 +135,7 @@
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
-#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
+#define NFP_NET_CFG_UPDATE_ERR (0x1U << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
#define NFP_NET_CFG_MTU 0x0018
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index b01036df..d70cbc63 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -34,7 +34,7 @@
*
* @file dpdk/pmd/nfp_net_pmd.h
*
- * Netronome NFP_NET PDM driver
+ * Netronome NFP_NET PMD driver
*/
#ifndef _NFP_NET_PMD_H_
@@ -240,7 +240,7 @@ struct nfp_net_txq {
uint32_t tx_free_thresh;
/*
- * For each descriptor keep a reference to the mbuff and
+ * For each descriptor keep a reference to the mbuf and
* DMA address used until completion is signalled.
*/
struct {
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
index 6e380cca..538f882b 100644
--- a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
@@ -368,6 +368,9 @@ _nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40,
isld[1] = isld1;
locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+ if (locality_lsb < 0)
+ return NFP_ERRNO(EINVAL);
+
if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
da = 1;
else
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
index c68d9400..37b7991f 100644
--- a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
+++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -16,7 +16,9 @@
#include <assert.h>
#include <stdio.h>
+#if defined(RTE_BACKTRACE)
#include <execinfo.h>
+#endif
#include <stdlib.h>
#include <unistd.h>
#include <stdint.h>
@@ -788,17 +790,17 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
if (cpp->driver_lock_needed) {
ret = nfp_acquire_process_lock(desc);
if (ret)
- return -1;
+ goto error;
}
if (nfp6000_set_model(dev, cpp) < 0)
- return -1;
+ goto error;
if (nfp6000_set_interface(dev, cpp) < 0)
- return -1;
+ goto error;
if (nfp6000_set_serial(dev, cpp) < 0)
- return -1;
+ goto error;
if (nfp6000_set_barsz(dev, desc) < 0)
- return -1;
+ goto error;
desc->cfg = (char *)dev->mem_resource[0].addr;
@@ -809,7 +811,11 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
model = __nfp_cpp_model_autodetect(cpp);
nfp_cpp_model_set(cpp, model);
- return ret;
+ return 0;
+
+error:
+ free(desc);
+ return -1;
}
static void
diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c
index 75d3c974..dec4a8b6 100644
--- a/drivers/net/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c
@@ -801,7 +801,8 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp)
uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
uint32_t model = 0;
- nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model);
+ if (nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model))
+ return 0;
if (NFP_CPP_MODEL_IS_6000(model)) {
uint32_t tmp;
@@ -810,8 +811,10 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp)
/* The PL's PluDeviceID revision code is authoratative */
model &= ~0xff;
- nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) +
- NFP_PL_DEVICE_ID, &tmp);
+ if (nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) +
+ NFP_PL_DEVICE_ID, &tmp))
+ return 0;
+
model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10;
}
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 920f6f89..2a4a08af 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -21,6 +21,7 @@
#include "base/octeontx_pkovf.h"
#include "base/octeontx_io.h"
+#define OCTEONTX_PMD net_octeontx
#define OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT 12
#define OCTEONTX_VDEV_NR_PORT_ARG ("nr_port")
#define OCTEONTX_MAX_NAME_LEN 32
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 9fd93277..65bbd7e2 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -1258,7 +1258,8 @@ create_eth:
if (pp == NULL) {
PMD_LOG(ERR,
"Failed to allocate memory for process private");
- return -1;
+ ret = -1;
+ goto free_kvlist;
}
eth_dev->dev_ops = &ops;
@@ -1281,7 +1282,7 @@ create_eth:
eth_dev->tx_pkt_burst = eth_pcap_tx;
rte_eth_dev_probing_finish(eth_dev);
- return 0;
+ goto free_kvlist;
}
ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index 2aaf298f..7047eb9f 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -400,9 +400,9 @@
#define QM_BYTE_CRD_REG_WIDTH 24
#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
#define QM_WFQ_CRD_REG_WIDTH 32
-#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_SIGN_BIT (1U << (QM_WFQ_CRD_REG_WIDTH - 1))
#define QM_RL_CRD_REG_WIDTH 32
-#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_SIGN_BIT (1U << (QM_RL_CRD_REG_WIDTH - 1))
/*****************/
/* CAU CONSTANTS */
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 6d4a4dd7..2ce0ea9e 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -2250,7 +2250,7 @@ struct igu_cleanup {
#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
/* must always be set (use enum command_type_bit) */
-#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1U
#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
__le32 reserved1;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 158ca673..7bc09479 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -2420,7 +2420,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
#define GFT_RAM_LINE_DST_PORT_MASK 0x1
#define GFT_RAM_LINE_DST_PORT_SHIFT 30
-#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1U
#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
__le32 hi;
#define GFT_RAM_LINE_DSCP_MASK 0x1
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
index b8c2686f..92361e79 100644
--- a/drivers/net/qede/base/ecore_hw_defs.h
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -51,7 +51,7 @@ struct igu_ctrl_reg {
#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
#define IGU_CTRL_REG_RESERVED_MASK 0x1
#define IGU_CTRL_REG_RESERVED_SHIFT 28
-#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_MASK 0x1U /* use enum igu_ctrl_cmd */
#define IGU_CTRL_REG_TYPE_SHIFT 31
};
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 518673dc..0b2f305e 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -2735,7 +2735,8 @@ static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_qedevf_pmd = {
.id_table = pci_id_qedevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = qedevf_eth_dev_pci_probe,
.remove = qedevf_eth_dev_pci_remove,
};
@@ -2754,7 +2755,8 @@ static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_qede_pmd = {
.id_table = pci_id_qede_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = qede_eth_dev_pci_probe,
.remove = qede_eth_dev_pci_remove,
};
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index eda19b2b..27bac099 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1420,13 +1420,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint32_t rss_hash;
int rx_alloc_count = 0;
- hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
- sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- rte_rmb();
-
- if (hw_comp_cons == sw_comp_cons)
- return 0;
/* Allocate buffers that we used in previous loop */
if (rxq->rx_alloc_count) {
@@ -1447,6 +1440,14 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->rx_alloc_count = 0;
}
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ rte_rmb();
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
while (sw_comp_cons != hw_comp_cons) {
ol_flags = 0;
packet_type = RTE_PTYPE_UNKNOWN;
@@ -1819,7 +1820,7 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
temp == PKT_TX_TUNNEL_GENEVE ||
temp == PKT_TX_TUNNEL_MPLSINUDP ||
temp == PKT_TX_TUNNEL_GRE)
- break;
+ continue;
}
rte_errno = -ENOTSUP;
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index aeb48f5e..c438da51 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -45,8 +45,8 @@ struct ring_queue {
};
struct pmd_internals {
- unsigned max_rx_queues;
- unsigned max_tx_queues;
+ unsigned int max_rx_queues;
+ unsigned int max_tx_queues;
struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
@@ -55,12 +55,11 @@ struct pmd_internals {
enum dev_action action;
};
-
static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_FIXED,
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_FIXED,
};
static int eth_ring_logtype;
@@ -138,6 +137,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_mempool *mb_pool __rte_unused)
{
struct pmd_internals *internals = dev->data->dev_private;
+
dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
return 0;
}
@@ -149,6 +149,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct pmd_internals *internals = dev->data->dev_private;
+
dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
return 0;
}
@@ -156,9 +157,10 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
static void
eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+ struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
+
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
@@ -169,7 +171,7 @@ eth_dev_info(struct rte_eth_dev *dev,
static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- unsigned i;
+ unsigned int i;
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
@@ -197,8 +199,9 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
eth_stats_reset(struct rte_eth_dev *dev)
{
- unsigned i;
+ unsigned int i;
struct pmd_internals *internal = dev->data->dev_private;
+
for (i = 0; i < dev->data->nb_rx_queues; i++)
internal->rx_ring_queues[i].rx_pkts.cnt = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -250,8 +253,10 @@ static struct rte_vdev_driver pmd_ring_drv;
static int
do_eth_dev_ring_create(const char *name,
- struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
- struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
+ struct rte_ring * const rx_queues[],
+ const unsigned int nb_rx_queues,
+ struct rte_ring *const tx_queues[],
+ const unsigned int nb_tx_queues,
const unsigned int numa_node, enum dev_action action,
struct rte_eth_dev **eth_dev_p)
{
@@ -260,20 +265,20 @@ do_eth_dev_ring_create(const char *name,
struct rte_eth_dev *eth_dev = NULL;
void **rx_queues_local = NULL;
void **tx_queues_local = NULL;
- unsigned i;
+ unsigned int i;
PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
numa_node);
- rx_queues_local = rte_zmalloc_socket(name,
- sizeof(void *) * nb_rx_queues, 0, numa_node);
+ rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
+ sizeof(void *), 0, numa_node);
if (rx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
}
- tx_queues_local = rte_zmalloc_socket(name,
- sizeof(void *) * nb_tx_queues, 0, numa_node);
+ tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
+ sizeof(void *), 0, numa_node);
if (tx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
@@ -344,10 +349,10 @@ error:
int
rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
- const unsigned nb_rx_queues,
+ const unsigned int nb_rx_queues,
struct rte_ring *const tx_queues[],
- const unsigned nb_tx_queues,
- const unsigned numa_node)
+ const unsigned int nb_tx_queues,
+ const unsigned int numa_node)
{
struct ring_internal_args args = {
.rx_queues = rx_queues,
@@ -357,8 +362,8 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
.numa_node = numa_node,
.addr = &args,
};
- char args_str[32] = { 0 };
- char ring_name[32] = { 0 };
+ char args_str[32];
+ char ring_name[RTE_RING_NAMESIZE];
uint16_t port_id = RTE_MAX_ETHPORTS;
int ret;
@@ -376,8 +381,14 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
return -1;
}
- snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
- snprintf(ring_name, 32, "net_ring_%s", name);
+ snprintf(args_str, sizeof(args_str), "%s=%p",
+ ETH_RING_INTERNAL_ARG, &args);
+
+ ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
+ if (ret >= (int)sizeof(ring_name)) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
ret = rte_vdev_init(ring_name, args_str);
if (ret) {
@@ -385,7 +396,11 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
return -1;
}
- rte_eth_dev_get_port_by_name(ring_name, &port_id);
+ ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
+ if (ret) {
+ rte_errno = ENODEV;
+ return -1;
+ }
return port_id;
}
@@ -398,20 +413,28 @@ rte_eth_from_ring(struct rte_ring *r)
}
static int
-eth_dev_ring_create(const char *name, const unsigned numa_node,
+eth_dev_ring_create(const char *name, const unsigned int numa_node,
enum dev_action action, struct rte_eth_dev **eth_dev)
{
/* rx and tx are so-called from point of view of first port.
* They are inverted from the point of view of second port
*/
struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
- unsigned i;
+ unsigned int i;
char rng_name[RTE_RING_NAMESIZE];
- unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
+ unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
RTE_PMD_RING_MAX_TX_RINGS);
for (i = 0; i < num_rings; i++) {
- snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
+ int cc;
+
+ cc = snprintf(rng_name, sizeof(rng_name),
+ "ETH_RXTX%u_%s", i, name);
+ if (cc >= (int)sizeof(rng_name)) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
+
rxtx[i] = (action == DEV_CREATE) ?
rte_ring_create(rng_name, 1024, numa_node,
RING_F_SP_ENQ|RING_F_SC_DEQ) :
@@ -429,17 +452,18 @@ eth_dev_ring_create(const char *name, const unsigned numa_node,
struct node_action_pair {
char name[PATH_MAX];
- unsigned node;
+ unsigned int node;
enum dev_action action;
};
struct node_action_list {
- unsigned total;
- unsigned count;
+ unsigned int total;
+ unsigned int count;
struct node_action_pair *list;
};
-static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
+static int parse_kvlist(const char *key __rte_unused,
+ const char *value, void *data)
{
struct node_action_list *info = data;
int ret;
@@ -552,8 +576,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
kvlist = rte_kvargs_parse(params, valid_arguments);
if (!kvlist) {
- PMD_LOG(INFO, "Ignoring unsupported parameters when creating"
- " rings-backed ethernet device");
+ PMD_LOG(INFO,
+ "Ignoring unsupported parameters when creatingrings-backed ethernet device");
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_CREATE, &eth_dev);
if (ret == -1) {
@@ -597,7 +621,7 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
goto out_free;
info->total = ret;
- info->list = (struct node_action_pair*)(info + 1);
+ info->list = (struct node_action_pair *)(info + 1);
ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
parse_kvlist, info);
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 6690053f..0d7311d6 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -1071,8 +1071,8 @@ sfc_unprobe(struct sfc_adapter *sa)
}
uint32_t
-sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
- uint32_t ll_default)
+sfc_register_logtype(const struct rte_pci_addr *pci_addr,
+ const char *lt_prefix_str, uint32_t ll_default)
{
size_t lt_prefix_str_size = strlen(lt_prefix_str);
size_t lt_str_size_max;
@@ -1092,7 +1092,7 @@ sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
lt_str[lt_prefix_str_size - 1] = '.';
- rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
+ rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
lt_str_size_max - lt_prefix_str_size);
lt_str[lt_str_size_max - 1] = '\0';
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index a94ca8e7..f1cb8300 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -311,7 +311,7 @@ int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp);
void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
-uint32_t sfc_register_logtype(struct sfc_adapter *sa,
+uint32_t sfc_register_logtype(const struct rte_pci_addr *pci_addr,
const char *lt_prefix_str,
uint32_t ll_default);
diff --git a/drivers/net/sfc/sfc_debug.h b/drivers/net/sfc/sfc_debug.h
index 6b600ff4..62f3937e 100644
--- a/drivers/net/sfc/sfc_debug.h
+++ b/drivers/net/sfc/sfc_debug.h
@@ -27,7 +27,8 @@
do { \
const struct sfc_adapter *_sa = (sa); \
\
- rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
+ rte_panic("sfc " PCI_PRI_FMT \
+ " #%" PRIu16 ": " fmt "\n", \
_sa->pci_addr.domain, _sa->pci_addr.bus, \
_sa->pci_addr.devid, _sa->pci_addr.function, \
_sa->port_id, ##args); \
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index ff6d5b48..cf229f8b 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -340,9 +340,7 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
bool eop;
- /* Both checks may be done, so use bit OR to have only one branching */
- if (unlikely((header_len > SFC_TSOH_STD_LEN) |
- (tcph_off > txq->tso_tcp_header_offset_limit)))
+ if (unlikely(tcph_off > txq->tso_tcp_header_offset_limit))
return EMSGSIZE;
/*
@@ -407,6 +405,13 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
SFC_TSOH_STD_LEN;
+ /*
+ * Discard a packet if header linearization is needed but
+ * the header is too big.
+ */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
hdr_addr = txq->tsoh + hdr_addr_off;
hdr_iova = txq->tsoh_iova + hdr_addr_off;
copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index a7322a1e..052d38cd 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -94,17 +94,17 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
/* Autonegotiation may be disabled */
dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_1G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_10G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_25G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_50G;
- if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
+ if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
dev_info->speed_capa |= ETH_LINK_SPEED_100G;
dev_info->max_rx_queues = sa->rxq_max;
@@ -860,6 +860,33 @@ fail_inval:
}
static int
+sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ boolean_t scatter_enabled;
+ const char *error;
+ unsigned int i;
+
+ for (i = 0; i < sa->rxq_count; i++) {
+ if ((sa->rxq_info[i].rxq->state & SFC_RXQ_INITIALIZED) == 0)
+ continue;
+
+ scatter_enabled = (sa->rxq_info[i].type_flags &
+ EFX_RXQ_FLAG_SCATTER);
+
+ if (!sfc_rx_check_scatter(pdu, sa->rxq_info[i].rxq->buf_size,
+ encp->enc_rx_prefix_size,
+ scatter_enabled, &error)) {
+ sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
+ error);
+ return EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct sfc_adapter *sa = dev->data->dev_private;
@@ -885,6 +912,10 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
sfc_adapter_lock(sa);
+ rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
+ if (rc != 0)
+ goto fail_check_scatter;
+
if (pdu != sa->port.pdu) {
if (sa->state == SFC_ADAPTER_STARTED) {
sfc_stop(sa);
@@ -921,6 +952,8 @@ fail_start:
sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
"PDU max size - port is stopped",
(unsigned int)pdu, (unsigned int)old_pdu);
+
+fail_check_scatter:
sfc_adapter_unlock(sa);
fail_inval:
@@ -1124,8 +1157,6 @@ sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct sfc_adapter *sa = dev->data->dev_private;
- sfc_log_init(sa, "RxQ=%u", rx_queue_id);
-
return sfc_rx_qdesc_npending(sa, rx_queue_id);
}
@@ -1877,7 +1908,7 @@ static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
};
static int
-sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
+sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev, uint32_t logtype_main)
{
/*
* Device private data has really many process-local pointers.
@@ -1891,12 +1922,14 @@ sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
if (dp_rx == NULL) {
- sfc_err(sa, "cannot find %s Rx datapath", sa->dp_rx_name);
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Rx datapath", sa->dp_rx_name);
rc = ENOENT;
goto fail_dp_rx;
}
if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
- sfc_err(sa, "%s Rx datapath does not support multi-process",
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "%s Rx datapath does not support multi-process",
sa->dp_rx_name);
rc = EINVAL;
goto fail_dp_rx_multi_process;
@@ -1904,12 +1937,14 @@ sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
if (dp_tx == NULL) {
- sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "cannot find %s Tx datapath", sa->dp_tx_name);
rc = ENOENT;
goto fail_dp_tx;
}
if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
- sfc_err(sa, "%s Tx datapath does not support multi-process",
+ SFC_LOG(sa, RTE_LOG_ERR, logtype_main,
+ "%s Tx datapath does not support multi-process",
sa->dp_tx_name);
rc = EINVAL;
goto fail_dp_tx_multi_process;
@@ -1957,27 +1992,30 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
{
struct sfc_adapter *sa = dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint32_t logtype_main;
int rc;
const efx_nic_cfg_t *encp;
const struct ether_addr *from;
sfc_register_dp();
+ logtype_main = sfc_register_logtype(&pci_dev->addr,
+ SFC_LOGTYPE_MAIN_STR,
+ RTE_LOG_NOTICE);
+
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -sfc_eth_dev_secondary_set_ops(dev);
+ return -sfc_eth_dev_secondary_set_ops(dev, logtype_main);
/* Required for logging */
sa->pci_addr = pci_dev->addr;
sa->port_id = dev->data->port_id;
+ sa->logtype_main = logtype_main;
sa->eth_dev = dev;
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
- sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR,
- RTE_LOG_NOTICE);
-
rc = sfc_kvargs_parse(sa);
if (rc != 0)
goto fail_kvargs_parse;
diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c
index 007506b4..e485e07d 100644
--- a/drivers/net/sfc/sfc_mcdi.c
+++ b/drivers/net/sfc/sfc_mcdi.c
@@ -256,7 +256,8 @@ sfc_mcdi_init(struct sfc_adapter *sa)
if (rc != 0)
goto fail_dma_alloc;
- mcdi->logtype = sfc_register_logtype(sa, SFC_LOGTYPE_MCDI_STR,
+ mcdi->logtype = sfc_register_logtype(&sa->pci_addr,
+ SFC_LOGTYPE_MCDI_STR,
RTE_LOG_NOTICE);
emtp = &mcdi->transport;
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index c792e0b2..a78d35a2 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -360,6 +360,18 @@ sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
return RTE_ETH_RX_DESC_UNAVAIL;
}
+boolean_t
+sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
+ boolean_t rx_scatter_enabled, const char **error)
+{
+ if ((rx_buf_size < pdu + rx_prefix_size) && !rx_scatter_enabled) {
+ *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
+ return B_FALSE;
+ }
+
+ return B_TRUE;
+}
+
struct sfc_rxq *
sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
@@ -964,6 +976,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
+ const char *error;
rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
&evq_entries, &rxq_max_fill_level);
@@ -987,10 +1000,11 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
goto fail_bad_conf;
}
- if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
- sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
- "object size is too small", sw_index);
+ if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
+ encp->enc_rx_prefix_size,
+ (offloads & DEV_RX_OFFLOAD_SCATTER),
+ &error)) {
+ sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
"PDU size %u plus Rx prefix %u bytes",
sw_index, buf_size, (unsigned int)sa->port.pdu,
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 3fba7d8a..65724b03 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -159,6 +159,10 @@ int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
efx_rx_hash_type_t *efx);
uint64_t sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa,
efx_rx_hash_type_t efx);
+boolean_t sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size,
+ uint32_t rx_prefix_size,
+ boolean_t rx_scatter_enabled,
+ const char **error);
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index 076a25d4..a28af0e7 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -107,10 +107,6 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
idx += SFC_TSO_OPT_DESCS_NUM;
- /* Packets which have too big headers should be discarded */
- if (unlikely(header_len > SFC_TSOH_STD_LEN))
- return EMSGSIZE;
-
/*
* The TCP header must start at most 208 bytes into the frame.
* If it starts later than this then the NIC won't realise
@@ -129,6 +125,13 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
* limitations on address boundaries crossing by DMA descriptor data.
*/
if (m->data_len < header_len) {
+ /*
+ * Discard a packet if header linearization is needed but
+ * the header is too big.
+ */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index aa73d264..242137e6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -737,7 +737,8 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* We may have reached this place for
* one of the following reasons:
*
- * 1) Packet header length is greater
+ * 1) Packet header linearization is needed
+ * and the header length is greater
* than SFC_TSOH_STD_LEN
* 2) TCP header starts at more then
* 208 bytes into the frame
diff --git a/drivers/net/softnic/rte_eth_softnic_flow.c b/drivers/net/softnic/rte_eth_softnic_flow.c
index 21e75300..aefc384d 100644
--- a/drivers/net/softnic/rte_eth_softnic_flow.c
+++ b/drivers/net/softnic/rte_eth_softnic_flow.c
@@ -1283,7 +1283,8 @@ flow_rule_action_get(struct pmd_internals *softnic,
action,
"QUEUE: Invalid RX queue ID");
- sprintf(name, "RXQ%u", (uint32_t)conf->index);
+ snprintf(name, sizeof(name), "RXQ%u",
+ (uint32_t)conf->index);
status = softnic_pipeline_port_out_find(softnic,
pipeline->name,
@@ -1373,7 +1374,7 @@ flow_rule_action_get(struct pmd_internals *softnic,
action,
"RSS: Invalid RX queue ID");
- sprintf(name, "RXQ%u",
+ snprintf(name, sizeof(name), "RXQ%u",
(uint32_t)conf->queue[i]);
status = softnic_pipeline_port_out_find(softnic,
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index a9342997..86787368 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -67,6 +67,8 @@
/* IPC key for queue fds sync */
#define TAP_MP_KEY "tap_mp_sync_queues"
+#define TAP_IOV_DEFAULT_MAX 1024
+
static int tap_devices_count;
static struct rte_vdev_driver pmd_tap_drv;
static struct rte_vdev_driver pmd_tun_drv;
@@ -1326,6 +1328,13 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
struct rx_queue *rxq = &internals->rxq[rx_queue_id];
struct rte_mbuf **tmp = &rxq->pool;
long iov_max = sysconf(_SC_IOV_MAX);
+
+ if (iov_max <= 0) {
+ TAP_LOG(WARNING,
+ "_SC_IOV_MAX is not defined. Using %d as default",
+ TAP_IOV_DEFAULT_MAX);
+ iov_max = TAP_IOV_DEFAULT_MAX;
+ }
uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
struct iovec (*iovecs)[nb_desc + 1];
int data_off = RTE_PKTMBUF_HEADROOM;
@@ -2055,13 +2064,14 @@ tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
int queue, fd_iterator;
/* Prepare the request */
+ memset(&request, 0, sizeof(request));
strlcpy(request.name, TAP_MP_KEY, sizeof(request.name));
strlcpy(request_param->port_name, port_name,
sizeof(request_param->port_name));
request.len_param = sizeof(*request_param);
/* Send request and receive reply */
ret = rte_mp_request_sync(&request, &replies, &timeout);
- if (ret < 0) {
+ if (ret < 0 || replies.nb_received != 1) {
TAP_LOG(ERR, "Failed to request queues from primary: %d",
rte_errno);
return -1;
@@ -2071,6 +2081,11 @@ tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
TAP_LOG(DEBUG, "Received IPC reply for %s", reply_param->port_name);
/* Attach the queues from received file descriptors */
+ if (reply_param->rxq_count + reply_param->txq_count != reply->num_fds) {
+ TAP_LOG(ERR, "Unexpected number of fds received");
+ return -1;
+ }
+
dev->data->nb_rx_queues = reply_param->rxq_count;
dev->data->nb_tx_queues = reply_param->txq_count;
fd_iterator = 0;
@@ -2078,7 +2093,7 @@ tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
process_private->rxq_fds[queue] = reply->fds[fd_iterator++];
for (queue = 0; queue < reply_param->txq_count; queue++)
process_private->txq_fds[queue] = reply->fds[fd_iterator++];
-
+ free(reply);
return 0;
}
@@ -2111,19 +2126,24 @@ tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer)
/* Fill file descriptors for all queues */
reply.num_fds = 0;
reply_param->rxq_count = 0;
+ if (dev->data->nb_rx_queues + dev->data->nb_tx_queues >
+ RTE_MP_MAX_FD_NUM){
+ TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds");
+ return -1;
+ }
+
for (queue = 0; queue < dev->data->nb_rx_queues; queue++) {
reply.fds[reply.num_fds++] = process_private->rxq_fds[queue];
reply_param->rxq_count++;
}
RTE_ASSERT(reply_param->rxq_count == dev->data->nb_rx_queues);
- RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
- RTE_ASSERT(reply.num_fds <= RTE_MP_MAX_FD_NUM);
reply_param->txq_count = 0;
for (queue = 0; queue < dev->data->nb_tx_queues; queue++) {
reply.fds[reply.num_fds++] = process_private->txq_fds[queue];
reply_param->txq_count++;
}
+ RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
/* Send reply */
strlcpy(reply.name, request->name, sizeof(reply.name));
diff --git a/drivers/net/tap/tap_bpf_program.c b/drivers/net/tap/tap_bpf_program.c
index 1cb73822..532e8838 100644
--- a/drivers/net/tap/tap_bpf_program.c
+++ b/drivers/net/tap/tap_bpf_program.c
@@ -106,7 +106,7 @@ rte_softrss_be(const __u32 *input_tuple, const uint8_t *rss_key,
for (j = 0; j < input_len; j++) {
#pragma unroll
for (i = 0; i < 32; i++) {
- if (input_tuple[j] & (1 << (31 - i))) {
+ if (input_tuple[j] & (1U << (31 - i))) {
hash ^= ((const __u32 *)def_rss_key)[j] << i |
(__u32)((uint64_t)
(((const __u32 *)def_rss_key)[j + 1])
diff --git a/drivers/net/vdev_netvsc/vdev_netvsc.c b/drivers/net/vdev_netvsc/vdev_netvsc.c
index 16303ef5..a5fd64e0 100644
--- a/drivers/net/vdev_netvsc/vdev_netvsc.c
+++ b/drivers/net/vdev_netvsc/vdev_netvsc.c
@@ -811,7 +811,7 @@ vdev_netvsc_cmp_rte_device(const struct rte_device *dev1,
static void
vdev_netvsc_scan_callback(__rte_unused void *arg)
{
- struct rte_vdev_device *dev;
+ struct rte_device *dev;
struct rte_devargs *devargs;
struct rte_bus *vbus = rte_bus_find_by_name("vdev");
@@ -819,8 +819,9 @@ vdev_netvsc_scan_callback(__rte_unused void *arg)
if (!strncmp(devargs->name, VDEV_NETVSC_DRIVER_NAME,
VDEV_NETVSC_DRIVER_NAME_LEN))
return;
- dev = (struct rte_vdev_device *)vbus->find_device(NULL,
- vdev_netvsc_cmp_rte_device, VDEV_NETVSC_DRIVER_NAME);
+
+ dev = vbus->find_device(NULL, vdev_netvsc_cmp_rte_device,
+ VDEV_NETVSC_DRIVER_NAME);
if (dev)
return;
if (rte_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 2ba66d29..f938b7ce 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -71,7 +71,6 @@ static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
static int virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int virtio_intr_enable(struct rte_eth_dev *dev);
static int virtio_intr_disable(struct rte_eth_dev *dev);
static int virtio_dev_queue_stats_mapping_set(
@@ -729,6 +728,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct virtqueue *vq = rxvq->vq;
virtqueue_enable_intr(vq);
+ virtio_mb();
return 0;
}
@@ -778,6 +778,21 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.mac_addr_set = virtio_mac_addr_set,
};
+/*
+ * dev_ops for virtio-user in secondary processes, as we just have
+ * some limited supports currently.
+ */
+const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
+ .dev_infos_get = virtio_dev_info_get,
+ .stats_get = virtio_dev_stats_get,
+ .xstats_get = virtio_dev_xstats_get,
+ .xstats_get_names = virtio_dev_xstats_get_names,
+ .stats_reset = virtio_dev_stats_reset,
+ .xstats_reset = virtio_dev_stats_reset,
+ /* collect stats per queue */
+ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
+};
+
static void
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
@@ -1693,6 +1708,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
out:
rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
return ret;
}
@@ -1820,6 +1836,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ hw->vtnet_hdr_size;
uint64_t rx_offloads = rxmode->offloads;
uint64_t tx_offloads = txmode->offloads;
uint64_t req_features;
@@ -1834,6 +1852,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
@@ -2185,6 +2206,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
host_features = VTPCI_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index e0f80e5a..39a9f7b7 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -45,6 +45,8 @@
1u << VIRTIO_NET_F_HOST_TSO4 | \
1u << VIRTIO_NET_F_HOST_TSO6)
+extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops;
+
/*
* CQ function prototype
*/
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eb891433..7f7562dd 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -192,7 +192,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
static void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
- uint16_t i, used_idx, desc_idx = 0, last_idx;
+ uint16_t i, idx = vq->vq_used_cons_idx;
int16_t free_cnt = 0;
struct vq_desc_extra *dxp = NULL;
@@ -200,27 +200,16 @@ virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
return;
for (i = 0; i < num; i++) {
- struct vring_used_elem *uep;
-
- used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
- uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t)uep->id;
-
- dxp = &vq->vq_descx[desc_idx];
- vq->vq_used_cons_idx++;
-
+ dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
+ free_cnt += dxp->ndescs;
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
}
- last_idx = desc_idx + dxp->ndescs - 1;
- free_cnt = last_idx - vq->vq_desc_tail_idx;
- if (free_cnt <= 0)
- free_cnt += vq->vq_nentries;
-
- vq_ring_free_inorder(vq, last_idx, free_cnt);
+ vq->vq_free_cnt += free_cnt;
+ vq->vq_used_cons_idx = idx;
}
static inline int
@@ -421,7 +410,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
while (i < num) {
idx = idx & (vq->vq_nentries - 1);
- dxp = &vq->vq_descx[idx];
+ dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
@@ -472,7 +461,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
- dxp = &vq->vq_descx[idx];
+ if (in_order)
+ dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
+ else
+ dxp = &vq->vq_descx[idx];
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
@@ -788,7 +780,7 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
stats->size_bins[0]++;
else if (s < 1519)
stats->size_bins[6]++;
- else if (s >= 1519)
+ else
stats->size_bins[7]++;
}
@@ -1107,6 +1099,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue,
prev = rcv_pkts[nb_rx];
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ virtio_rmb();
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
rcv_cnt);
uint16_t extra_idx = 0;
@@ -1271,6 +1264,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t rcv_cnt =
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ virtio_rmb();
uint32_t rx_num =
virtqueue_dequeue_burst_rx(vq,
rcv_pkts, len, rcv_cnt);
@@ -1380,6 +1374,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_pktmbuf_free(txm);
continue;
}
+ /* vlan_insert may add a header mbuf */
+ tx_pkts[nb_tx] = txm;
}
/* optimize ring usage */
@@ -1484,6 +1480,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
rte_pktmbuf_free(txm);
continue;
}
+ /* vlan_insert may add a header mbuf */
+ tx_pkts[nb_tx] = txm;
}
/* optimize ring usage */
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h
index 83a85cc6..55f47036 100644
--- a/drivers/net/virtio/virtio_user/vhost.h
+++ b/drivers/net/virtio/virtio_user/vhost.h
@@ -2,8 +2,8 @@
* Copyright(c) 2010-2016 Intel Corporation
*/
-#ifndef _VHOST_NET_USER_H
-#define _VHOST_NET_USER_H
+#ifndef _VIRTIO_USER_VHOST_H
+#define _VIRTIO_USER_VHOST_H
#include <stdint.h>
#include <linux/types.h>
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index a3faf1d0..fbd9e979 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -62,6 +62,7 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
const char *mac, uint64_t features)
{
unsigned int tap_features;
+ char *tap_name = NULL;
int sndbuf = INT_MAX;
struct ifreq ifr;
int tapfd;
@@ -112,6 +113,12 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
goto error;
}
+ tap_name = strdup(ifr.ifr_name);
+ if (!tap_name) {
+ PMD_DRV_LOG(ERR, "strdup ifname failed: %s", strerror(errno));
+ goto error;
+ }
+
fcntl(tapfd, F_SETFL, O_NONBLOCK);
if (ioctl(tapfd, TUNSETVNETHDRSZ, &hdr_size) < 0) {
@@ -134,11 +141,12 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
goto error;
}
- if (!(*p_ifname))
- *p_ifname = strdup(ifr.ifr_name);
+ free(*p_ifname);
+ *p_ifname = tap_name;
return tapfd;
error:
+ free(tap_name);
close(tapfd);
return -1;
}
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 2c6eba0a..0a88d595 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -393,7 +393,10 @@ virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
return -1;
flag = fcntl(fd, F_GETFL);
- fcntl(fd, F_SETFL, flag | O_NONBLOCK);
+ if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) {
+ PMD_DRV_LOG(ERR, "fcntl failed, %s", strerror(errno));
+ return -1;
+ }
return 0;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 9c8bcd2c..f0051f88 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -412,7 +412,7 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int mrg_rxbuf, int in_order)
+ int server, int mrg_rxbuf, int in_order)
{
pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
@@ -420,6 +420,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
+ dev->is_server = server;
dev->mac_specified = 0;
dev->frontend_features = 0;
dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index c42ce5d4..3e3a7b78 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -50,7 +50,7 @@ int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int mrg_rxbuf, int in_order);
+ int server, int mrg_rxbuf, int in_order);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index f8791391..5781c094 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -469,6 +469,26 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
char *mac_addr = NULL;
int ret = -1;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ const char *name = rte_vdev_device_name(dev);
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ RTE_LOG(ERR, PMD, "Failed to probe %s\n", name);
+ return -1;
+ }
+
+ if (eth_virtio_dev_init(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
+ rte_eth_dev_release_port(eth_dev);
+ return -1;
+ }
+
+ eth_dev->dev_ops = &virtio_user_secondary_eth_dev_ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
if (!kvlist) {
PMD_INIT_LOG(ERR, "error when parsing param");
@@ -581,33 +601,19 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
}
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- struct virtio_user_dev *vu_dev;
-
- eth_dev = virtio_user_eth_dev_alloc(dev);
- if (!eth_dev) {
- PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
- goto end;
- }
-
- hw = eth_dev->data->dev_private;
- vu_dev = virtio_user_get_dev(hw);
- if (server_mode == 1)
- vu_dev->is_server = true;
- else
- vu_dev->is_server = false;
- if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
- queue_size, mac_addr, &ifname, mrg_rxbuf,
- in_order) < 0) {
- PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
- virtio_user_eth_dev_free(eth_dev);
- goto end;
- }
+ eth_dev = virtio_user_eth_dev_alloc(dev);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+ goto end;
+ }
- } else {
- eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
- if (!eth_dev)
- goto end;
+ hw = eth_dev->data->dev_private;
+ if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
+ queue_size, mac_addr, &ifname, server_mode,
+ mrg_rxbuf, in_order) < 0) {
+ PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
+ virtio_user_eth_dev_free(eth_dev);
+ goto end;
}
/* previously called by rte_pci_probe() for physical dev */
@@ -649,6 +655,9 @@ virtio_user_pmd_remove(struct rte_vdev_device *vdev)
if (!eth_dev)
return -ENODEV;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
+
/* make sure the device is stopped, queues freed */
rte_eth_dev_close(eth_dev->data->port_id);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 93e5de9a..812e1857 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -52,6 +52,7 @@
#define VMXNET3_RX_OFFLOAD_CAP \
(DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index 469960a3..11951980 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -274,6 +274,8 @@ dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
int ret;
name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -1;
DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
index 60621eb8..55f32a59 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*/
#include <string.h>
@@ -219,6 +219,7 @@ int __rte_experimental
rte_qdma_configure(struct rte_qdma_config *qdma_config)
{
int ret;
+ char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
DPAA2_QDMA_FUNC_TRACE();
@@ -258,8 +259,12 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)
}
qdma_dev.max_vqs = qdma_config->max_vqs;
- /* Allocate FLE pool */
- qdma_dev.fle_pool = rte_mempool_create("qdma_fle_pool",
+ /* Allocate FLE pool; just append PID so that in case of
+ * multiprocess, the pool's don't collide.
+ */
+ snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
+ getpid());
+ qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
@@ -303,6 +308,7 @@ rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
/* Return in case no VQ is free */
if (i == qdma_dev.max_vqs) {
rte_spinlock_unlock(&qdma_dev.lock);
+ DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
return -ENODEV;
}
@@ -719,7 +725,7 @@ rte_qdma_vq_destroy(uint16_t vq_id)
memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
- rte_spinlock_lock(&qdma_dev.lock);
+ rte_spinlock_unlock(&qdma_dev.lock);
return 0;
}
@@ -793,9 +799,6 @@ dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
DPAA2_QDMA_FUNC_TRACE();
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
/* Remove HW queues from global list */
remove_hw_queues_from_list(dpdmai_dev);
@@ -834,10 +837,6 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
DPAA2_QDMA_FUNC_TRACE();
- /* For secondary processes, the primary has done all the work */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
/* Open DPDMAI device */
dpdmai_dev->dpdmai_id = dpdmai_id;
dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
index 17fffcb7..29b4fe91 100644
--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -31,7 +31,7 @@ enum {
};
/**
- * If user has configued a Virtual Queue mode, but for some particular VQ
+ * If user has configured a Virtual Queue mode, but for some particular VQ
* user needs an exclusive H/W queue associated (for better performance
* on that particular VQ), then user can pass this flag while creating the
* Virtual Queue. A H/W queue will be allocated corresponding to
@@ -262,7 +262,7 @@ rte_qdma_vq_stats(uint16_t vq_id,
* VQ's at runtime.
*
* @param vq_id
- * Virtual Queue ID which needs to be deinialized.
+ * Virtual Queue ID which needs to be uninitialized.
*
* @returns
* - 0: Success.
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
index da772d02..eff001b5 100644
--- a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
@@ -244,7 +244,8 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id,
if (ret) {
IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s\n",
file_name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto close_fd;
}
buffer_size = file_stat.st_size;
IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size);
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.h b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
index c7759b8b..234ce364 100644
--- a/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
@@ -8,8 +8,8 @@
extern int ifpga_rawdev_logtype;
#define IFPGA_RAWDEV_PMD_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, ifpga_rawdev_logtype, "ifgpa: " fmt, \
- ##args)
+ rte_log(RTE_LOG_ ## level, ifpga_rawdev_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
#define IFPGA_RAWDEV_PMD_FUNC_TRACE() IFPGA_RAWDEV_PMD_LOG(DEBUG, ">>")
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
index d7630fc6..63f2b9a0 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
@@ -705,6 +705,9 @@ skeleton_rawdev_probe(struct rte_vdev_device *vdev)
name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
/* More than one instance is not supported */
if (skeldev_init_once) {
SKELETON_PMD_ERR("Multiple instance not supported for %s",
@@ -740,6 +743,8 @@ skeleton_rawdev_remove(struct rte_vdev_device *vdev)
int ret;
name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -1;
SKELETON_PMD_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
diff --git a/examples/ethtool/lib/rte_ethtool.h b/examples/ethtool/lib/rte_ethtool.h
index 43adc97a..31cd5ae4 100644
--- a/examples/ethtool/lib/rte_ethtool.h
+++ b/examples/ethtool/lib/rte_ethtool.h
@@ -9,7 +9,7 @@
* This new interface is designed to provide a user-space shim layer for
* Ethtool and Netdevice op API.
*
- * rte_ethtool_get_driver: ethtool_ops::get_driverinfo
+ * rte_ethtool_get_driver: ethtool_ops::get_drvinfo
* rte_ethtool_get_link: ethtool_ops::get_link
* rte_ethtool_get_regs_len: ethtool_ops::get_regs_len
* rte_ethtool_get_regs: ethtool_ops::get_regs
@@ -23,7 +23,7 @@
* rte_ethtool_net_stop: net_device_ops::ndo_stop
* rte_ethtool_net_set_mac_addr: net_device_ops::ndo_set_mac_address
* rte_ethtool_net_validate_addr: net_device_ops::ndo_validate_addr
- * rte_ethtool_net_change_mtu: net_device_ops::rte_net_change_mtu
+ * rte_ethtool_net_change_mtu: net_device_ops::ndo_change_mtu
* rte_ethtool_net_get_stats64: net_device_ops::ndo_get_stats64
* rte_ethtool_net_vlan_rx_add_vid net_device_ops::ndo_vlan_rx_add_vid
* rte_ethtool_net_vlan_rx_kill_vid net_device_ops::ndo_vlan_rx_kill_vid
diff --git a/examples/fips_validation/main.c b/examples/fips_validation/main.c
index e7559c63..40785dec 100644
--- a/examples/fips_validation/main.c
+++ b/examples/fips_validation/main.c
@@ -806,19 +806,20 @@ fips_run_test(void)
if (ret < 0) {
RTE_LOG(ERR, USER1, "Error %i: Init session\n",
ret);
- return ret;
+ goto exit;
}
ret = test_ops.prepare_op();
if (ret < 0) {
RTE_LOG(ERR, USER1, "Error %i: Prepare op\n",
ret);
- return ret;
+ goto exit;
}
if (rte_cryptodev_enqueue_burst(env.dev_id, 0, &env.op, 1) < 1) {
RTE_LOG(ERR, USER1, "Error: Failed enqueue\n");
- return ret;
+ ret = -1;
+ goto exit;
}
do {
@@ -830,6 +831,7 @@ fips_run_test(void)
vec.status = env.op->status;
+exit:
rte_cryptodev_sym_session_clear(env.dev_id, env.sess);
rte_cryptodev_sym_session_free(env.sess);
env.sess = NULL;
diff --git a/examples/ip_pipeline/meson.build b/examples/ip_pipeline/meson.build
index 5e5fe647..664223c9 100644
--- a/examples/ip_pipeline/meson.build
+++ b/examples/ip_pipeline/meson.build
@@ -6,6 +6,7 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
+build = cc.has_header('sys/epoll.h')
deps += ['pipeline', 'bus_pci']
allow_experimental_apis = true
sources = files(
diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile
index 02d41e39..a6933801 100644
--- a/examples/ipsec-secgw/Makefile
+++ b/examples/ipsec-secgw/Makefile
@@ -52,7 +52,7 @@ clean:
else
ifeq ($(RTE_SDK),)
- $(error "Please define RTE_SDK environment variable")
+$(error "Please define RTE_SDK environment variable")
endif
# Default target, can be overridden by command line or environment
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index e33232c9..faa84ddd 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -162,7 +162,7 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
}
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
return -1;
}
@@ -455,7 +455,8 @@ esp_outbound_post(struct rte_mbuf *m,
} else {
RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",
+ __func__);
return -1;
}
}
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index f88fdb4c..dfb93375 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -255,7 +255,8 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
}
} else {
/* Unknown/Unsupported type, drop the packet */
- RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
+ rte_be_to_cpu_16(eth->ether_type));
rte_pktmbuf_free(pkt);
}
@@ -425,11 +426,11 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
res = ip->res[i];
- if (res & BYPASS) {
+ if (res == BYPASS) {
ip->pkts[j++] = m;
continue;
}
- if (res & DISCARD) {
+ if (res == DISCARD) {
rte_pktmbuf_free(m);
continue;
}
@@ -440,9 +441,8 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
continue;
}
- sa_idx = ip->res[i] & PROTECT_MASK;
- if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
- !inbound_sa_check(sa, m, sa_idx)) {
+ sa_idx = SPI2IDX(res);
+ if (!inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
continue;
}
@@ -523,16 +523,15 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
j = 0;
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
- sa_idx = ip->res[i] & PROTECT_MASK;
- if (ip->res[i] & DISCARD)
+ sa_idx = SPI2IDX(ip->res[i]);
+ if (ip->res[i] == DISCARD)
rte_pktmbuf_free(m);
- else if (ip->res[i] & BYPASS)
+ else if (ip->res[i] == BYPASS)
ip->pkts[j++] = m;
- else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
+ else {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
- } else /* invalid SA idx */
- rte_pktmbuf_free(m);
+ }
}
ip->num = j;
}
@@ -932,7 +931,8 @@ main_loop(__attribute__((unused)) void *dummy)
qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
if (qconf->nb_rx_queue == 0) {
- RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
return 0;
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 508d87af..86d8f7df 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -40,10 +40,8 @@
#define SPI2IDX(spi) (spi & (IPSEC_SA_MAX_ENTRIES - 1))
#define INVALID_SPI (0)
-#define DISCARD (0x80000000)
-#define BYPASS (0x40000000)
-#define PROTECT_MASK (0x3fffffff)
-#define PROTECT(sa_idx) (SPI2IDX(sa_idx) & PROTECT_MASK) /* SA idx 30 bits */
+#define DISCARD INVALID_SPI
+#define BYPASS UINT32_MAX
#define IPSEC_XFORM_MAX 2
@@ -241,6 +239,14 @@ sp4_init(struct socket_ctx *ctx, int32_t socket_id);
void
sp6_init(struct socket_ctx *ctx, int32_t socket_id);
+/*
+ * Search through SA entries for given SPI.
+ * Returns first entry index if found(greater or equal then zero),
+ * or -ENOENT otherwise.
+ */
+int
+sa_spi_present(uint32_t spi, int inbound);
+
void
sa_init(struct socket_ctx *ctx, int32_t socket_id);
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 640f1d79..f7b6eb0b 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -79,7 +79,7 @@ const struct supported_cipher_algo cipher_algos[] = {
.keyword = "aes-128-ctr",
.algo = RTE_CRYPTO_CIPHER_AES_CTR,
.iv_len = 8,
- .block_size = 16, /* XXX AESNI MB limition, should be 4 */
+ .block_size = 4,
.key_len = 20
},
{
@@ -125,11 +125,11 @@ const struct supported_aead_algo aead_algos[] = {
}
};
-struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
-uint32_t nb_sa_out;
+static struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
+static uint32_t nb_sa_out;
-struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
-uint32_t nb_sa_in;
+static struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
+static uint32_t nb_sa_in;
static const struct supported_cipher_algo *
find_match_cipher_algo(const char *cipher_keyword)
@@ -630,7 +630,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
*ri = *ri + 1;
}
-static inline void
+static void
print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
{
uint32_t i;
@@ -687,7 +687,22 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
}
break;
case TRANSPORT:
- printf("Transport");
+ printf("Transport ");
+ break;
+ }
+ printf(" type:");
+ switch (sa->type) {
+ case RTE_SECURITY_ACTION_TYPE_NONE:
+ printf("no-offload ");
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ printf("inline-crypto-offload ");
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ printf("inline-protocol-offload ");
+ break;
+ case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+ printf("lookaside-protocol-offload ");
break;
}
printf("\n");
@@ -714,8 +729,8 @@ sa_create(const char *name, int32_t socket_id)
snprintf(s, sizeof(s), "%s_%u", name, socket_id);
/* Create SA array table */
- printf("Creating SA context with %u maximum entries\n",
- IPSEC_SA_MAX_ENTRIES);
+ printf("Creating SA context with %u maximum entries on socket %d\n",
+ IPSEC_SA_MAX_ENTRIES, socket_id);
mz_size = sizeof(struct sa_ctx);
mz = rte_memzone_reserve(s, mz_size, socket_id,
@@ -901,6 +916,31 @@ sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
return sa_add_rules(sa_ctx, entries, nb_entries, 1);
}
+/*
+ * Walk through all SA rules to find an SA with given SPI
+ */
+int
+sa_spi_present(uint32_t spi, int inbound)
+{
+ uint32_t i, num;
+ const struct ipsec_sa *sar;
+
+ if (inbound != 0) {
+ sar = sa_in;
+ num = nb_sa_in;
+ } else {
+ sar = sa_out;
+ num = nb_sa_out;
+ }
+
+ for (i = 0; i != num; i++) {
+ if (sar[i].spi == spi)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
void
sa_init(struct socket_ctx *ctx, int32_t socket_id)
{
diff --git a/examples/ipsec-secgw/sp4.c b/examples/ipsec-secgw/sp4.c
index 6b05daaa..99362a68 100644
--- a/examples/ipsec-secgw/sp4.c
+++ b/examples/ipsec-secgw/sp4.c
@@ -99,6 +99,7 @@ parse_sp4_tokens(char **tokens, uint32_t n_tokens,
uint32_t *ri = NULL; /* rule index */
uint32_t ti = 0; /* token index */
+ uint32_t tv;
uint32_t esp_p = 0;
uint32_t protect_p = 0;
@@ -169,8 +170,12 @@ parse_sp4_tokens(char **tokens, uint32_t n_tokens,
if (status->status < 0)
return;
- rule_ipv4->data.userdata =
- PROTECT(atoi(tokens[ti]));
+ tv = atoi(tokens[ti]);
+ APP_CHECK(tv != DISCARD && tv != BYPASS, status,
+ "invalid SPI: %s", tokens[ti]);
+ if (status->status < 0)
+ return;
+ rule_ipv4->data.userdata = tv;
protect_p = 1;
continue;
@@ -472,6 +477,36 @@ acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules,
return ctx;
}
+/*
+ * check that for each rule it's SPI has a correspondent entry in SAD
+ */
+static int
+check_spi_value(int inbound)
+{
+ uint32_t i, num, spi;
+ const struct acl4_rules *acr;
+
+ if (inbound != 0) {
+ acr = acl4_rules_in;
+ num = nb_acl4_rules_in;
+ } else {
+ acr = acl4_rules_out;
+ num = nb_acl4_rules_out;
+ }
+
+ for (i = 0; i != num; i++) {
+ spi = acr[i].data.userdata;
+ if (spi != DISCARD && spi != BYPASS &&
+ sa_spi_present(spi, inbound) < 0) {
+ RTE_LOG(ERR, IPSEC, "SPI %u is not present in SAD\n",
+ spi);
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
void
sp4_init(struct socket_ctx *ctx, int32_t socket_id)
{
@@ -488,6 +523,14 @@ sp4_init(struct socket_ctx *ctx, int32_t socket_id)
rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already "
"initialized\n", socket_id);
+ if (check_spi_value(1) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Inbound IPv4 SP DB has unmatched in SAD SPIs\n");
+
+ if (check_spi_value(0) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Outbound IPv4 SP DB has unmatched in SAD SPIs\n");
+
if (nb_acl4_rules_in > 0) {
name = "sp_ip4_in";
ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name,
diff --git a/examples/ipsec-secgw/sp6.c b/examples/ipsec-secgw/sp6.c
index dc5b94c6..bfcabf39 100644
--- a/examples/ipsec-secgw/sp6.c
+++ b/examples/ipsec-secgw/sp6.c
@@ -130,6 +130,7 @@ parse_sp6_tokens(char **tokens, uint32_t n_tokens,
uint32_t *ri = NULL; /* rule index */
uint32_t ti = 0; /* token index */
+ uint32_t tv;
uint32_t esp_p = 0;
uint32_t protect_p = 0;
@@ -202,8 +203,12 @@ parse_sp6_tokens(char **tokens, uint32_t n_tokens,
if (status->status < 0)
return;
- rule_ipv6->data.userdata =
- PROTECT(atoi(tokens[ti]));
+ tv = atoi(tokens[ti]);
+ APP_CHECK(tv != DISCARD && tv != BYPASS, status,
+ "invalid SPI: %s", tokens[ti]);
+ if (status->status < 0)
+ return;
+ rule_ipv6->data.userdata = tv;
protect_p = 1;
continue;
@@ -586,6 +591,36 @@ acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules,
return ctx;
}
+/*
+ * check that for each rule it's SPI has a correspondent entry in SAD
+ */
+static int
+check_spi_value(int inbound)
+{
+ uint32_t i, num, spi;
+ const struct acl6_rules *acr;
+
+ if (inbound != 0) {
+ acr = acl6_rules_in;
+ num = nb_acl6_rules_in;
+ } else {
+ acr = acl6_rules_out;
+ num = nb_acl6_rules_out;
+ }
+
+ for (i = 0; i != num; i++) {
+ spi = acr[i].data.userdata;
+ if (spi != DISCARD && spi != BYPASS &&
+ sa_spi_present(spi, inbound) < 0) {
+ RTE_LOG(ERR, IPSEC, "SPI %u is not present in SAD\n",
+ spi);
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
void
sp6_init(struct socket_ctx *ctx, int32_t socket_id)
{
@@ -602,6 +637,14 @@ sp6_init(struct socket_ctx *ctx, int32_t socket_id)
rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u "
"already initialized\n", socket_id);
+ if (check_spi_value(1) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Inbound IPv6 SP DB has unmatched in SAD SPIs\n");
+
+ if (check_spi_value(0) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Outbound IPv6 SP DB has unmatched in SAD SPIs\n");
+
if (nb_acl6_rules_in > 0) {
name = "sp_ip6_in";
ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name,
diff --git a/examples/l2fwd-cat/cat.c b/examples/l2fwd-cat/cat.c
index a6081e67..502c6b32 100644
--- a/examples/l2fwd-cat/cat.c
+++ b/examples/l2fwd-cat/cat.c
@@ -345,7 +345,7 @@ check_cpus_overlapping(void)
for (i = 0; i < m_config_count; i++) {
for (j = i + 1; j < m_config_count; j++) {
- CPU_AND(&mask,
+ RTE_CPU_AND(&mask,
&m_config[i].cpumask,
&m_config[j].cpumask);
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 30c8e44b..3af5dc69 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -37,8 +37,6 @@
#include "args.h"
#include "init.h"
-#define MBUFS_PER_CLIENT 1536
-#define MBUFS_PER_PORT 1536
#define MBUF_CACHE_SIZE 512
#define RTE_MP_RX_DESC_DEFAULT 1024
@@ -63,8 +61,15 @@ struct port_info *ports;
static int
init_mbuf_pools(void)
{
- const unsigned num_mbufs = (num_clients * MBUFS_PER_CLIENT) \
- + (ports->num_ports * MBUFS_PER_PORT);
+ const unsigned int num_mbufs_server =
+ RTE_MP_RX_DESC_DEFAULT * ports->num_ports;
+ const unsigned int num_mbufs_client =
+ num_clients * (CLIENT_QUEUE_RINGSIZE +
+ RTE_MP_TX_DESC_DEFAULT * ports->num_ports);
+ const unsigned int num_mbufs_mp_cache =
+ (num_clients + 1) * MBUF_CACHE_SIZE;
+ const unsigned int num_mbufs =
+ num_mbufs_server + num_mbufs_client + num_mbufs_mp_cache;
/* don't pass single-producer/single-consumer flags to mbuf create as it
* seems faster to use a cache instead */
diff --git a/examples/performance-thread/common/lthread_api.h b/examples/performance-thread/common/lthread_api.h
index 995deb4d..e6879ea5 100644
--- a/examples/performance-thread/common/lthread_api.h
+++ b/examples/performance-thread/common/lthread_api.h
@@ -206,12 +206,12 @@ void lthread_run(void);
* Creates an lthread and places it in the ready queue on a particular
* lcore.
*
- * If no scheduler exists yet on the curret lcore then one is created.
+ * If no scheduler exists yet on the current lcore then one is created.
*
* @param new_lt
* Pointer to an lthread pointer that will be initialized
* @param lcore
- * the lcore the thread should be started on or the current clore
+ * the lcore the thread should be started on or the current lcore
* -1 the current lcore
* 0 - LTHREAD_MAX_LCORES any other lcore
* @param lthread_func
@@ -469,7 +469,7 @@ void
/**
* Set lthread TLS
*
- * This function is modelled on pthread_set_sepcific()
+ * This function is modelled on pthread_set_specific()
* It associates a thread-specific value with a key obtained via a previous
* call to lthread_key_create().
* Different threads may bind different values to the same key. These values
@@ -749,7 +749,7 @@ int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
* Signal a condition variable
*
* The function unblocks one thread waiting for the condition variable cond.
- * If no threads are waiting on cond, the rte_lthead_cond_signal() function
+ * If no threads are waiting on cond, the rte_lthread_cond_signal() function
* has no effect.
*
* @param cond
@@ -765,7 +765,7 @@ int lthread_cond_signal(struct lthread_cond *c);
* Broadcast a condition variable
*
* The function unblocks all threads waiting for the condition variable cond.
- * If no threads are waiting on cond, the rte_lthead_cond_broadcast()
+ * If no threads are waiting on cond, the rte_lathed_cond_broadcast()
* function has no effect.
*
* @param cond
diff --git a/examples/vhost_crypto/meson.build b/examples/vhost_crypto/meson.build
index daf19fb8..8e9860f0 100644
--- a/examples/vhost_crypto/meson.build
+++ b/examples/vhost_crypto/meson.build
@@ -6,6 +6,7 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
+build = dpdk_conf.has('RTE_LIBRTE_VHOST')
allow_experimental_apis = true
deps += ['vhost', 'cryptodev']
cflags += ['-D_FILE_OFFSET_BITS=64']
diff --git a/examples/vhost_scsi/vhost_scsi.c b/examples/vhost_scsi/vhost_scsi.c
index 2908ff68..513af0cc 100644
--- a/examples/vhost_scsi/vhost_scsi.c
+++ b/examples/vhost_scsi/vhost_scsi.c
@@ -285,6 +285,12 @@ ctrlr_worker(void *arg)
cpu_set_t cpuset;
pthread_t thread;
+ if (ctrlr == NULL || ctrlr->bdev == NULL) {
+ fprintf(stderr, "%s: Error, invalid argument passed to worker thread\n",
+ __func__);
+ exit(0);
+ }
+
thread = pthread_self();
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5da53154..b5b7c678 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -158,7 +158,8 @@ parse_json_to_pkt(json_t *element, struct channel_packet *pkt)
if (ret)
return ret;
} else if (!strcmp(key, "name")) {
- strcpy(pkt->vm_name, json_string_value(value));
+ strlcpy(pkt->vm_name, json_string_value(value),
+ sizeof(pkt->vm_name));
} else if (!strcmp(key, "command")) {
char command[32];
snprintf(command, 32, "%s", json_string_value(value));
@@ -835,18 +836,13 @@ read_json_packet(struct channel_info *chan_info)
indent--;
if ((indent > 0) || (idx > 0))
idx++;
- if (indent == 0)
+ if (indent <= 0)
json_data[idx] = 0;
if (idx >= MAX_JSON_STRING_LEN-1)
break;
} while (indent > 0);
- if (indent > 0)
- /*
- * We've broken out of the read loop without getting
- * a closing brace, so throw away the data
- */
- json_data[idx] = 0;
+ json_data[idx] = '\0';
if (strlen(json_data) == 0)
continue;
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index 893bf4cd..5fa13fe6 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -31,9 +31,15 @@
#include "vm_power_cli.h"
#include "oob_monitor.h"
#include "parse.h"
+#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
#include <rte_pmd_i40e.h>
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
#include <rte_pmd_bnxt.h>
+#endif
#define RX_RING_SIZE 1024
#define TX_RING_SIZE 1024
@@ -175,6 +181,7 @@ parse_args(int argc, char **argv)
if (cnt < 0) {
printf("Invalid core-list - [%s]\n",
optarg);
+ free(oob_enable);
break;
}
for (i = 0; i < ci->core_count; i++) {
@@ -369,14 +376,21 @@ main(int argc, char **argv)
for (w = 0; w < MAX_VFS; w++) {
eth.addr_bytes[5] = w + 0xf0;
+ ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_set_vf_mac_addr(portid,
w, &eth);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
if (ret == -ENOTSUP)
ret = rte_pmd_i40e_set_vf_mac_addr(
portid, w, &eth);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
if (ret == -ENOTSUP)
ret = rte_pmd_bnxt_set_vf_mac_addr(
portid, w, &eth);
+#endif
switch (ret) {
case 0:
@@ -390,7 +404,6 @@ main(int argc, char **argv)
break;
}
printf("\n");
- break;
}
}
}
diff --git a/examples/vm_power_manager/oob_monitor_x86.c b/examples/vm_power_manager/oob_monitor_x86.c
index 589c604e..ebd96b20 100644
--- a/examples/vm_power_manager/oob_monitor_x86.c
+++ b/examples/vm_power_manager/oob_monitor_x86.c
@@ -33,10 +33,10 @@ static float
apply_policy(int core)
{
struct core_info *ci;
- uint64_t counter;
+ uint64_t counter = 0;
uint64_t branches, branch_misses;
- uint32_t last_branches, last_branch_misses;
- int hits_diff, miss_diff;
+ uint64_t last_branches, last_branch_misses;
+ int64_t hits_diff, miss_diff;
float ratio;
int ret;
@@ -54,6 +54,7 @@ apply_policy(int core)
core);
branches = counter;
+ counter = 0;
ret = pread(ci->cd[core].msr_fd, &counter,
sizeof(counter), IA32_PERFCTR1);
if (ret < 0)
@@ -66,13 +67,25 @@ apply_policy(int core)
ci->cd[core].last_branches = branches;
ci->cd[core].last_branch_misses = branch_misses;
- hits_diff = (int)branches - (int)last_branches;
+ /*
+ * Intentional right shift to make MSB 0 to avoid
+ * possible signed overflow or truncation.
+ */
+ branches >>= 1;
+ last_branches >>= 1;
+ hits_diff = (int64_t)branches - (int64_t)last_branches;
if (hits_diff <= 0) {
/* Likely a counter overflow condition, skip this round */
return -1.0;
}
- miss_diff = (int)branch_misses - (int)last_branch_misses;
+ /*
+ * Intentional right shift to make MSB 0 to avoid
+ * possible signed overflow or truncation.
+ */
+ branch_misses >>= 1;
+ last_branch_misses >>= 1;
+ miss_diff = (int64_t)branch_misses - (int64_t)last_branch_misses;
if (miss_diff <= 0) {
/* Likely a counter overflow condition, skip this round */
return -1.0;
diff --git a/examples/vm_power_manager/power_manager.c b/examples/vm_power_manager/power_manager.c
index f9e8c0ab..318fb025 100644
--- a/examples/vm_power_manager/power_manager.c
+++ b/examples/vm_power_manager/power_manager.c
@@ -157,7 +157,7 @@ power_manager_get_current_frequency(unsigned core_num)
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
index = rte_power_get_freq(core_num);
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
- if (index >= POWER_MGR_MAX_CPUS)
+ if (index >= RTE_MAX_LCORE_FREQS)
freq = 0;
else
freq = global_core_freq_info[core_num].freqs[index];
diff --git a/kernel/linux/kni/ethtool/igb/igb_main.c b/kernel/linux/kni/ethtool/igb/igb_main.c
index 0b4faeae..cda2b063 100644
--- a/kernel/linux/kni/ethtool/igb/igb_main.c
+++ b/kernel/linux/kni/ethtool/igb/igb_main.c
@@ -2112,7 +2112,13 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
#ifdef HAVE_NDO_FDB_ADD_VID
u16 vid,
#endif
+#ifdef HAVE_NDO_FDB_ADD_EXTACK
+ u16 flags,
+ struct netlink_ext_ack *extack)
+#else
u16 flags)
+#endif
+
#else
static int igb_ndo_fdb_add(struct ndmsg *ndm,
struct net_device *dev,
diff --git a/kernel/linux/kni/ethtool/igb/kcompat.h b/kernel/linux/kni/ethtool/igb/kcompat.h
index 11b15f3a..649a69c8 100644
--- a/kernel/linux/kni/ethtool/igb/kcompat.h
+++ b/kernel/linux/kni/ethtool/igb/kcompat.h
@@ -3945,6 +3945,10 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
#define HAVE_NDO_BRIDGE_SETLINK_EXTACK
#endif /* >= 5.0.0 */
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0) )
+#define HAVE_NDO_FDB_ADD_EXTACK
+#endif /* >= 5.1.0 */
+
#if defined(timer_setup) && defined(from_timer)
#define HAVE_TIMER_SETUP
#endif
diff --git a/kernel/linux/kni/kni_misc.c b/kernel/linux/kni/kni_misc.c
index 522ae23b..b74e8a3a 100644
--- a/kernel/linux/kni/kni_misc.c
+++ b/kernel/linux/kni/kni_misc.c
@@ -447,7 +447,7 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
ether_addr_copy(net_dev->dev_addr, kni->lad_dev->dev_addr);
else {
/* if user has provided a valid mac address */
- if (is_valid_ether_addr((unsigned char *)(dev_info.mac_addr)))
+ if (is_valid_ether_addr(dev_info.mac_addr))
memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN);
else
/*
diff --git a/lib/librte_acl/acl_vect.h b/lib/librte_acl/acl_vect.h
index 194fca90..951e5828 100644
--- a/lib/librte_acl/acl_vect.h
+++ b/lib/librte_acl/acl_vect.h
@@ -17,7 +17,7 @@ extern "C" {
/*
- * Takes 2 SIMD registers containing N transitions eachi (tr0, tr1).
+ * Takes 2 SIMD registers containing N transitions each (tr0, tr1).
* Shuffles it into different representation:
* lo - contains low 32 bits of given N transitions.
* hi - contains high 32 bits of given N transitions.
@@ -66,7 +66,7 @@ extern "C" {
\
dfa_ofs = _##P##_sub_epi32(t, r); \
\
- /* QUAD/SINGLE caluclations. */ \
+ /* QUAD/SINGLE calculations. */ \
t = _##P##_cmpgt_epi8(in, tr_hi); \
t = _##P##_sign_epi8(t, t); \
t = _##P##_maddubs_epi16(t, t); \
diff --git a/lib/librte_acl/meson.build b/lib/librte_acl/meson.build
index aec792f5..2207dbaf 100644
--- a/lib/librte_acl/meson.build
+++ b/lib/librte_acl/meson.build
@@ -23,7 +23,7 @@ if arch_subdir == 'x86'
avx2_tmplib = static_library('avx2_tmp',
'acl_run_avx2.c',
dependencies: static_rte_eal,
- c_args: '-mavx2')
+ c_args: cflags + ['-mavx2'])
objs += avx2_tmplib.extract_objects('acl_run_avx2.c')
cflags += '-DCC_AVX2_SUPPORT'
endif
diff --git a/lib/librte_bbdev/rte_bbdev.h b/lib/librte_bbdev/rte_bbdev.h
index 25ef409f..4a2873b2 100644
--- a/lib/librte_bbdev/rte_bbdev.h
+++ b/lib/librte_bbdev/rte_bbdev.h
@@ -43,7 +43,7 @@ extern "C" {
#define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
#endif
-/** Flags indiciate current state of BBDEV device */
+/** Flags indicate current state of BBDEV device */
enum rte_bbdev_state {
RTE_BBDEV_UNUSED,
RTE_BBDEV_INITIALIZED
@@ -161,7 +161,7 @@ rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
/**
* Start a device.
- * This is the last step needed before enqueueing operations is possible.
+ * This is the last step needed before enqueuing operations is possible.
*
* @param dev_id
* The identifier of the device.
diff --git a/lib/librte_bitratestats/rte_bitrate.c b/lib/librte_bitratestats/rte_bitrate.c
index c4b28f62..639e4754 100644
--- a/lib/librte_bitratestats/rte_bitrate.c
+++ b/lib/librte_bitratestats/rte_bitrate.c
@@ -67,6 +67,7 @@ rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
int64_t delta;
const int64_t alpha_percent = 20;
uint64_t values[6];
+ int ret;
if (bitrate_data == NULL)
return -EINVAL;
@@ -124,7 +125,10 @@ rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
values[3] = port_data->mean_obits;
values[4] = port_data->peak_ibits;
values[5] = port_data->peak_obits;
- rte_metrics_update_values(port_id, bitrate_data->id_stats_set,
+ ret = rte_metrics_update_values(port_id, bitrate_data->id_stats_set,
values, ARRAY_SIZE(values));
+ if (ret < 0)
+ return ret;
+
return 0;
}
diff --git a/lib/librte_bpf/rte_bpf.h b/lib/librte_bpf/rte_bpf.h
index ad62ef2c..ab92af8f 100644
--- a/lib/librte_bpf/rte_bpf.h
+++ b/lib/librte_bpf/rte_bpf.h
@@ -120,7 +120,7 @@ rte_bpf_destroy(struct rte_bpf *bpf);
* Create a new eBPF execution context and load given BPF code into it.
*
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @return
* BPF handle that is used in future BPF operations,
* or NULL on error, with error code set in rte_errno.
@@ -136,7 +136,7 @@ rte_bpf_load(const struct rte_bpf_prm *prm);
* file into it.
*
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @param fname
* Pathname for a ELF file.
* @param sname
@@ -183,7 +183,7 @@ rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
uint32_t num);
/**
- * Provide information about natively compield code for given BPF handle.
+ * Provide information about natively compiled code for given BPF handle.
*
* @param bpf
* handle for the BPF code.
diff --git a/lib/librte_bpf/rte_bpf_ethdev.h b/lib/librte_bpf/rte_bpf_ethdev.h
index 11d09cdc..1943372f 100644
--- a/lib/librte_bpf/rte_bpf_ethdev.h
+++ b/lib/librte_bpf/rte_bpf_ethdev.h
@@ -73,7 +73,7 @@ rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue);
* @param sname
* Name of the executable section within the file to load.
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @param flags
* Flags that define expected behavior of the loaded filter
* (i.e. jited/non-jited version to use).
@@ -98,7 +98,7 @@ rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
* @param sname
* Name of the executable section within the file to load.
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @param flags
* Flags that define expected expected behavior of the loaded filter
* (i.e. jited/non-jited version to use).
diff --git a/lib/librte_cfgfile/rte_cfgfile.c b/lib/librte_cfgfile/rte_cfgfile.c
index 7d8c941e..61426963 100644
--- a/lib/librte_cfgfile/rte_cfgfile.c
+++ b/lib/librte_cfgfile/rte_cfgfile.c
@@ -7,6 +7,7 @@
#include <string.h>
#include <ctype.h>
#include <errno.h>
+#include <rte_string_fns.h>
#include <rte_common.h>
#include "rte_cfgfile.h"
@@ -224,10 +225,11 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
_strip(split[1], strlen(split[1]));
char *end = memchr(split[1], '\\', strlen(split[1]));
+ size_t split_len = strlen(split[1]) + 1;
while (end != NULL) {
if (*(end+1) == params->comment_character) {
*end = '\0';
- strcat(split[1], end+1);
+ strlcat(split[1], end+1, split_len);
} else
end++;
end = memchr(end, '\\', strlen(end));
diff --git a/lib/librte_cryptodev/rte_crypto_asym.h b/lib/librte_cryptodev/rte_crypto_asym.h
index 5e185b2d..b1c1a6c1 100644
--- a/lib/librte_cryptodev/rte_crypto_asym.h
+++ b/lib/librte_cryptodev/rte_crypto_asym.h
@@ -112,15 +112,15 @@ enum rte_crypto_rsa_padding_type {
/**< RSA no padding scheme */
RTE_CRYPTO_RSA_PKCS1_V1_5_BT0,
/**< RSA PKCS#1 V1.5 Block Type 0 padding scheme
- * as descibed in rfc2313
+ * as described in rfc2313
*/
RTE_CRYPTO_RSA_PKCS1_V1_5_BT1,
/**< RSA PKCS#1 V1.5 Block Type 01 padding scheme
- * as descibed in rfc2313
+ * as described in rfc2313
*/
RTE_CRYPTO_RSA_PKCS1_V1_5_BT2,
/**< RSA PKCS#1 V1.5 Block Type 02 padding scheme
- * as descibed in rfc2313
+ * as described in rfc2313
*/
RTE_CRYPTO_RSA_PADDING_OAEP,
/**< RSA PKCS#1 OAEP padding scheme */
@@ -227,7 +227,7 @@ struct rte_crypto_rsa_xform {
/**
* Asymmetric Modular exponentiation transform data
*
- * Structure describing modular exponentation xform param
+ * Structure describing modular exponentiation xform param
*
*/
struct rte_crypto_modex_xform {
@@ -271,7 +271,7 @@ struct rte_crypto_dh_xform {
rte_crypto_param p;
/**< p : Prime modulus data
- * DH prime modulous data in octet-string network byte order format.
+ * DH prime modulus data in octet-string network byte order format.
*
*/
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index a52eaaa4..ff8520cf 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -576,7 +576,7 @@ rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
cmp = strncmp(devs[i].device->driver->name,
driver_name,
- strlen(driver_name));
+ strlen(driver_name) + 1);
if (cmp == 0)
devices[count++] = devs[i].data->dev_id;
@@ -1571,7 +1571,7 @@ rte_cryptodev_driver_id_get(const char *name)
TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
driver_name = driver->driver->name;
- if (strncmp(driver_name, name, strlen(driver_name)) == 0)
+ if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
return driver->id;
}
return -1;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 4099823f..d9c3a064 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1097,7 +1097,7 @@ rte_cryptodev_asym_session_clear(uint8_t dev_id,
* Get the size of the header session, for all registered drivers.
*
* @return
- * Size of the symmetric eader session.
+ * Size of the symmetric header session.
*/
unsigned int
rte_cryptodev_sym_get_header_session_size(void);
diff --git a/lib/librte_distributor/rte_distributor_private.h b/lib/librte_distributor/rte_distributor_private.h
index fce68c95..33cd8941 100644
--- a/lib/librte_distributor/rte_distributor_private.h
+++ b/lib/librte_distributor/rte_distributor_private.h
@@ -41,7 +41,7 @@ extern "C" {
/**
* Maximum number of workers allowed.
- * Be aware of increasing the limit, becaus it is limited by how we track
+ * Be aware of increasing the limit, because it is limited by how we track
* in-flight tags. See in_flight_bitmask and rte_distributor_process
*/
#define RTE_DISTRIB_MAX_WORKERS 64
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index f01495e3..bfac7fdc 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -227,7 +227,7 @@ rte_eal_config_create(void)
return;
if (mem_cfg_fd < 0){
- mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
+ mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0600);
if (mem_cfg_fd < 0)
rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
}
@@ -662,6 +662,12 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ if (rte_eal_alarm_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread");
+ /* rte_eal_alarm_init sets rte_errno on failure. */
+ return -1;
+ }
+
/* Put mp channel init before bus scan so that we can init the vdev
* bus through mp channel in the secondary process before the bus scan.
*/
@@ -751,12 +757,6 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- if (rte_eal_alarm_init() < 0) {
- rte_eal_init_alert("Cannot init interrupt-handling thread");
- /* rte_eal_alarm_init sets rte_errno on failure. */
- return -1;
- }
-
if (rte_eal_timer_init() < 0) {
rte_eal_init_alert("Cannot init HPET or TSC timers");
rte_errno = ENOTSUP;
diff --git a/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c b/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c
index 1e8f5df2..32012e14 100644
--- a/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c
@@ -22,7 +22,7 @@ static void *
map_shared_memory(const char *filename, const size_t mem_size, int flags)
{
void *retval;
- int fd = open(filename, flags, 0666);
+ int fd = open(filename, flags, 0600);
if (fd < 0)
return NULL;
if (ftruncate(fd, mem_size) < 0) {
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 999ba24b..e3ef3714 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -55,6 +55,7 @@ static uint64_t system_page_sz;
static uint64_t baseaddr = 0x100000000;
#endif
+#define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5
void *
eal_get_virtual_area(void *requested_addr, size_t *size,
size_t page_sz, int flags, int mmap_flags)
@@ -62,6 +63,7 @@ eal_get_virtual_area(void *requested_addr, size_t *size,
bool addr_is_hint, allow_shrink, unmap, no_align;
uint64_t map_sz;
void *mapped_addr, *aligned_addr;
+ uint8_t try = 0;
if (system_page_sz == 0)
system_page_sz = sysconf(_SC_PAGESIZE);
@@ -117,11 +119,14 @@ eal_get_virtual_area(void *requested_addr, size_t *size,
if (mapped_addr != MAP_FAILED && addr_is_hint &&
mapped_addr != requested_addr) {
- /* hint was not used. Try with another offset */
- munmap(mapped_addr, map_sz);
- mapped_addr = MAP_FAILED;
+ try++;
next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz);
- requested_addr = next_baseaddr;
+ if (try <= MAX_MMAP_WITH_DEFINED_ADDR_TRIES) {
+ /* hint was not used. Try with another offset */
+ munmap(mapped_addr, map_sz);
+ mapped_addr = MAP_FAILED;
+ requested_addr = next_baseaddr;
+ }
}
} while ((allow_shrink || addr_is_hint) &&
mapped_addr == MAP_FAILED && *size > 0);
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index f6dfbc73..d4ab5e23 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -216,6 +216,7 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->create_uio_dev = 0;
internal_cfg->iova_mode = RTE_IOVA_DC;
internal_cfg->user_mbuf_pool_ops_name = NULL;
+ CPU_ZERO(&internal_cfg->ctrl_cpuset);
internal_cfg->init_complete = 0;
}
@@ -417,21 +418,44 @@ eal_service_cores_parsed(void)
}
static int
-eal_parse_coremask(const char *coremask)
+update_lcore_config(int *cores)
{
struct rte_config *cfg = rte_eal_get_configuration();
- int i, j, idx = 0;
+ unsigned int count = 0;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (cores[i] != -1) {
+ if (!lcore_config[i].detected) {
+ RTE_LOG(ERR, EAL, "lcore %u unavailable\n", i);
+ ret = -1;
+ continue;
+ }
+ cfg->lcore_role[i] = ROLE_RTE;
+ count++;
+ } else {
+ cfg->lcore_role[i] = ROLE_OFF;
+ }
+ lcore_config[i].core_index = cores[i];
+ }
+ if (!ret)
+ cfg->lcore_count = count;
+ return ret;
+}
+
+static int
+eal_parse_coremask(const char *coremask, int *cores)
+{
unsigned count = 0;
- char c;
+ int i, j, idx;
int val;
+ char c;
- if (eal_service_cores_parsed())
- RTE_LOG(WARNING, EAL,
- "Service cores parsed before dataplane cores. "
- "Please ensure -c is before -s or -S\n");
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++)
+ cores[idx] = -1;
+ idx = 0;
- if (coremask == NULL)
- return -1;
/* Remove all blank characters ahead and after .
* Remove 0x/0X if exists.
*/
@@ -456,32 +480,16 @@ eal_parse_coremask(const char *coremask)
for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++)
{
if ((1 << j) & val) {
- if (!lcore_config[idx].detected) {
- RTE_LOG(ERR, EAL, "lcore %u "
- "unavailable\n", idx);
- return -1;
- }
-
- cfg->lcore_role[idx] = ROLE_RTE;
- lcore_config[idx].core_index = count;
+ cores[idx] = count;
count++;
- } else {
- cfg->lcore_role[idx] = ROLE_OFF;
- lcore_config[idx].core_index = -1;
}
}
}
for (; i >= 0; i--)
if (coremask[i] != '0')
return -1;
- for (; idx < RTE_MAX_LCORE; idx++) {
- cfg->lcore_role[idx] = ROLE_OFF;
- lcore_config[idx].core_index = -1;
- }
if (count == 0)
return -1;
- /* Update the count of enabled logical cores of the EAL configuration */
- cfg->lcore_count = count;
return 0;
}
@@ -562,34 +570,19 @@ eal_parse_service_corelist(const char *corelist)
}
static int
-eal_parse_corelist(const char *corelist)
+eal_parse_corelist(const char *corelist, int *cores)
{
- struct rte_config *cfg = rte_eal_get_configuration();
- int i, idx = 0;
unsigned count = 0;
char *end = NULL;
int min, max;
+ int idx;
- if (eal_service_cores_parsed())
- RTE_LOG(WARNING, EAL,
- "Service cores parsed before dataplane cores. "
- "Please ensure -l is before -s or -S\n");
-
- if (corelist == NULL)
- return -1;
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++)
+ cores[idx] = -1;
- /* Remove all blank characters ahead and after */
+ /* Remove all blank characters ahead */
while (isblank(*corelist))
corelist++;
- i = strlen(corelist);
- while ((i > 0) && isblank(corelist[i - 1]))
- i--;
-
- /* Reset config */
- for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
- cfg->lcore_role[idx] = ROLE_OFF;
- lcore_config[idx].core_index = -1;
- }
/* Get list of cores */
min = RTE_MAX_LCORE;
@@ -600,10 +593,10 @@ eal_parse_corelist(const char *corelist)
return -1;
errno = 0;
idx = strtol(corelist, &end, 10);
- if (idx < 0 || idx >= (int)cfg->lcore_count)
- return -1;
if (errno || end == NULL)
return -1;
+ if (idx < 0 || idx >= RTE_MAX_LCORE)
+ return -1;
while (isblank(*end))
end++;
if (*end == '-') {
@@ -613,9 +606,8 @@ eal_parse_corelist(const char *corelist)
if (min == RTE_MAX_LCORE)
min = idx;
for (idx = min; idx <= max; idx++) {
- if (cfg->lcore_role[idx] != ROLE_RTE) {
- cfg->lcore_role[idx] = ROLE_RTE;
- lcore_config[idx].core_index = count;
+ if (cores[idx] == -1) {
+ cores[idx] = count;
count++;
}
}
@@ -627,10 +619,6 @@ eal_parse_corelist(const char *corelist)
if (count == 0)
return -1;
-
- /* Update the count of enabled logical cores of the EAL configuration */
- cfg->lcore_count = count;
-
return 0;
}
@@ -1106,13 +1094,81 @@ eal_parse_iova_mode(const char *name)
return 0;
}
+/* caller is responsible for freeing the returned string */
+static char *
+available_cores(void)
+{
+ char *str = NULL;
+ int previous;
+ int sequence;
+ char *tmp;
+ int idx;
+
+ /* find the first available cpu */
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
+ if (!lcore_config[idx].detected)
+ continue;
+ break;
+ }
+ if (idx >= RTE_MAX_LCORE)
+ return NULL;
+
+ /* first sequence */
+ if (asprintf(&str, "%d", idx) < 0)
+ return NULL;
+ previous = idx;
+ sequence = 0;
+
+ for (idx++ ; idx < RTE_MAX_LCORE; idx++) {
+ if (!lcore_config[idx].detected)
+ continue;
+
+ if (idx == previous + 1) {
+ previous = idx;
+ sequence = 1;
+ continue;
+ }
+
+ /* finish current sequence */
+ if (sequence) {
+ if (asprintf(&tmp, "%s-%d", str, previous) < 0) {
+ free(str);
+ return NULL;
+ }
+ free(str);
+ str = tmp;
+ }
+
+ /* new sequence */
+ if (asprintf(&tmp, "%s,%d", str, idx) < 0) {
+ free(str);
+ return NULL;
+ }
+ free(str);
+ str = tmp;
+ previous = idx;
+ sequence = 0;
+ }
+
+ /* finish last sequence */
+ if (sequence) {
+ if (asprintf(&tmp, "%s-%d", str, previous) < 0) {
+ free(str);
+ return NULL;
+ }
+ free(str);
+ str = tmp;
+ }
+
+ return str;
+}
+
int
eal_parse_common_option(int opt, const char *optarg,
struct internal_config *conf)
{
static int b_used;
static int w_used;
- struct rte_config *cfg = rte_eal_get_configuration();
switch (opt) {
/* blacklist */
@@ -1136,9 +1192,23 @@ eal_parse_common_option(int opt, const char *optarg,
w_used = 1;
break;
/* coremask */
- case 'c':
- if (eal_parse_coremask(optarg) < 0) {
- RTE_LOG(ERR, EAL, "invalid coremask\n");
+ case 'c': {
+ int lcore_indexes[RTE_MAX_LCORE];
+
+ if (eal_service_cores_parsed())
+ RTE_LOG(WARNING, EAL,
+ "Service cores parsed before dataplane cores. Please ensure -c is before -s or -S\n");
+ if (eal_parse_coremask(optarg, lcore_indexes) < 0) {
+ RTE_LOG(ERR, EAL, "invalid coremask syntax\n");
+ return -1;
+ }
+ if (update_lcore_config(lcore_indexes) < 0) {
+ char *available = available_cores();
+
+ RTE_LOG(ERR, EAL,
+ "invalid coremask, please check specified cores are part of %s\n",
+ available);
+ free(available);
return -1;
}
@@ -1152,12 +1222,26 @@ eal_parse_common_option(int opt, const char *optarg,
core_parsed = LCORE_OPT_MSK;
break;
+ }
/* corelist */
- case 'l':
- if (eal_parse_corelist(optarg) < 0) {
+ case 'l': {
+ int lcore_indexes[RTE_MAX_LCORE];
+
+ if (eal_service_cores_parsed())
+ RTE_LOG(WARNING, EAL,
+ "Service cores parsed before dataplane cores. Please ensure -l is before -s or -S\n");
+
+ if (eal_parse_corelist(optarg, lcore_indexes) < 0) {
+ RTE_LOG(ERR, EAL, "invalid core list syntax\n");
+ return -1;
+ }
+ if (update_lcore_config(lcore_indexes) < 0) {
+ char *available = available_cores();
+
RTE_LOG(ERR, EAL,
- "invalid core list, please check core numbers are in [0, %u] range\n",
- cfg->lcore_count-1);
+ "invalid core list, please check specified cores are part of %s\n",
+ available);
+ free(available);
return -1;
}
@@ -1171,6 +1255,7 @@ eal_parse_common_option(int opt, const char *optarg,
core_parsed = LCORE_OPT_LST;
break;
+ }
/* service coremask */
case 's':
if (eal_parse_service_coremask(optarg) < 0) {
@@ -1342,10 +1427,9 @@ eal_auto_detect_cores(struct rte_config *cfg)
unsigned int lcore_id;
unsigned int removed = 0;
rte_cpuset_t affinity_set;
- pthread_t tid = pthread_self();
- if (pthread_getaffinity_np(tid, sizeof(rte_cpuset_t),
- &affinity_set) < 0)
+ if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
+ &affinity_set))
CPU_ZERO(&affinity_set);
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1359,6 +1443,31 @@ eal_auto_detect_cores(struct rte_config *cfg)
cfg->lcore_count -= removed;
}
+static void
+compute_ctrl_threads_cpuset(struct internal_config *internal_cfg)
+{
+ rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset;
+ rte_cpuset_t default_set;
+ unsigned int lcore_id;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (eal_cpu_detected(lcore_id) &&
+ rte_lcore_has_role(lcore_id, ROLE_OFF)) {
+ CPU_SET(lcore_id, cpuset);
+ }
+ }
+
+ if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
+ &default_set))
+ CPU_ZERO(&default_set);
+
+ RTE_CPU_AND(cpuset, cpuset, &default_set);
+
+ /* if no detected CPU is off, use master core */
+ if (!CPU_COUNT(cpuset))
+ CPU_SET(rte_get_master_lcore(), cpuset);
+}
+
int
eal_cleanup_config(struct internal_config *internal_cfg)
{
@@ -1392,6 +1501,8 @@ eal_adjust_config(struct internal_config *internal_cfg)
lcore_config[cfg->master_lcore].core_role = ROLE_RTE;
}
+ compute_ctrl_threads_cpuset(internal_cfg);
+
/* if no memory amounts were requested, this will result in 0 and
* will be overridden later, right after eal_hugepage_info_init() */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c
index b46d644b..852e52e0 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -285,7 +285,15 @@ read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
break;
}
}
-
+ /* sanity-check the response */
+ if (m->msg.num_fds < 0 || m->msg.num_fds > RTE_MP_MAX_FD_NUM) {
+ RTE_LOG(ERR, EAL, "invalid number of fd's received\n");
+ return -1;
+ }
+ if (m->msg.len_param < 0 || m->msg.len_param > RTE_MP_MAX_PARAM_LEN) {
+ RTE_LOG(ERR, EAL, "invalid received data length\n");
+ return -1;
+ }
return 0;
}
@@ -678,11 +686,6 @@ send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
unlink(dst_path);
return 0;
}
- if (errno == ENOBUFS) {
- RTE_LOG(ERR, EAL, "Peer cannot receive message %s\n",
- dst_path);
- return 0;
- }
RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
dst_path, strerror(errno));
return -1;
@@ -758,6 +761,18 @@ check_input(const struct rte_mp_msg *msg)
if (validate_action_name(msg->name))
return false;
+ if (msg->len_param < 0) {
+ RTE_LOG(ERR, EAL, "Message data length is negative\n");
+ rte_errno = EINVAL;
+ return false;
+ }
+
+ if (msg->num_fds < 0) {
+ RTE_LOG(ERR, EAL, "Number of fd's is negative\n");
+ rte_errno = EINVAL;
+ return false;
+ }
+
if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
RTE_LOG(ERR, EAL, "Message data is too long\n");
rte_errno = E2BIG;
@@ -919,7 +934,7 @@ int __rte_experimental
rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
const struct timespec *ts)
{
- int dir_fd, ret = 0;
+ int dir_fd, ret = -1;
DIR *mp_dir;
struct dirent *ent;
struct timeval now;
@@ -927,13 +942,13 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
- if (check_input(req) == false)
- return -1;
-
reply->nb_sent = 0;
reply->nb_received = 0;
reply->msgs = NULL;
+ if (check_input(req) == false)
+ goto end;
+
if (internal_config.no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
return 0;
@@ -942,7 +957,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
if (gettimeofday(&now, NULL) < 0) {
RTE_LOG(ERR, EAL, "Failed to get current time\n");
rte_errno = errno;
- return -1;
+ goto end;
}
end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
@@ -954,7 +969,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
pthread_mutex_lock(&pending_requests.lock);
ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
pthread_mutex_unlock(&pending_requests.lock);
- return ret;
+ goto end;
}
/* for primary process, broadcast request, and collect reply 1 by 1 */
@@ -962,7 +977,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
if (!mp_dir) {
RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
rte_errno = errno;
- return -1;
+ goto end;
}
dir_fd = dirfd(mp_dir);
@@ -970,9 +985,8 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
if (flock(dir_fd, LOCK_SH)) {
RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
mp_dir_path);
- closedir(mp_dir);
rte_errno = errno;
- return -1;
+ goto close_end;
}
pthread_mutex_lock(&pending_requests.lock);
@@ -989,14 +1003,25 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
* locks on receive
*/
if (mp_request_sync(path, req, reply, &end))
- ret = -1;
+ goto unlock_end;
}
+ ret = 0;
+
+unlock_end:
pthread_mutex_unlock(&pending_requests.lock);
/* unlock the directory */
flock(dir_fd, LOCK_UN);
+close_end:
/* dir_fd automatically closed on closedir */
closedir(mp_dir);
+
+end:
+ if (ret) {
+ free(reply->msgs);
+ reply->nb_received = 0;
+ reply->msgs = NULL;
+ }
return ret;
}
diff --git a/lib/librte_eal/common/eal_common_thread.c b/lib/librte_eal/common/eal_common_thread.c
index 48ef4d6d..14f206c0 100644
--- a/lib/librte_eal/common/eal_common_thread.c
+++ b/lib/librte_eal/common/eal_common_thread.c
@@ -16,6 +16,7 @@
#include <rte_memory.h>
#include <rte_log.h>
+#include "eal_internal_cfg.h"
#include "eal_private.h"
#include "eal_thread.h"
@@ -168,10 +169,9 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
const pthread_attr_t *attr,
void *(*start_routine)(void *), void *arg)
{
+ rte_cpuset_t *cpuset = &internal_config.ctrl_cpuset;
struct rte_thread_ctrl_params *params;
- unsigned int lcore_id;
- rte_cpuset_t cpuset;
- int cpu_found, ret;
+ int ret;
params = malloc(sizeof(*params));
if (!params)
@@ -195,21 +195,8 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
"Cannot set name for ctrl thread\n");
}
- cpu_found = 0;
- CPU_ZERO(&cpuset);
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- if (eal_cpu_detected(lcore_id) &&
- rte_lcore_has_role(lcore_id, ROLE_OFF)) {
- CPU_SET(lcore_id, &cpuset);
- cpu_found = 1;
- }
- }
- /* if no detected cpu is off, use master core */
- if (!cpu_found)
- CPU_SET(rte_get_master_lcore(), &cpuset);
-
- ret = pthread_setaffinity_np(*thread, sizeof(cpuset), &cpuset);
- if (ret < 0)
+ ret = pthread_setaffinity_np(*thread, sizeof(*cpuset), cpuset);
+ if (ret)
goto fail;
ret = pthread_barrier_wait(&params->configured);
diff --git a/lib/librte_eal/common/eal_internal_cfg.h b/lib/librte_eal/common/eal_internal_cfg.h
index 783ce7de..189d4f5b 100644
--- a/lib/librte_eal/common/eal_internal_cfg.h
+++ b/lib/librte_eal/common/eal_internal_cfg.h
@@ -13,6 +13,8 @@
#include <rte_eal.h>
#include <rte_pci_dev_feature_defs.h>
+#include "eal_thread.h"
+
#define MAX_HUGEPAGE_SIZES 3 /**< support up to 3 page sizes */
/*
@@ -71,6 +73,7 @@ struct internal_config {
unsigned num_hugepage_sizes; /**< how many sizes on this system */
struct hugepage_info hugepage_info[MAX_HUGEPAGE_SIZES];
enum rte_iova_mode iova_mode ; /**< Set IOVA mode on this system */
+ rte_cpuset_t ctrl_cpuset; /**< cpuset for ctrl threads */
volatile unsigned int init_complete;
/**< indicates whether EAL has completed initialization */
};
diff --git a/lib/librte_eal/common/eal_options.h b/lib/librte_eal/common/eal_options.h
index 327c95e9..1623ae8c 100644
--- a/lib/librte_eal/common/eal_options.h
+++ b/lib/librte_eal/common/eal_options.h
@@ -5,6 +5,8 @@
#ifndef EAL_OPTIONS_H
#define EAL_OPTIONS_H
+#include "getopt.h"
+
enum {
/* long options mapped to a short option */
#define OPT_HELP "help"
diff --git a/lib/librte_eal/common/hotplug_mp.c b/lib/librte_eal/common/hotplug_mp.c
index 9d610a8a..7c3f38db 100644
--- a/lib/librte_eal/common/hotplug_mp.c
+++ b/lib/librte_eal/common/hotplug_mp.c
@@ -361,7 +361,7 @@ int eal_dev_hotplug_request_to_primary(struct eal_dev_mp_req *req)
ret = rte_mp_request_sync(&mp_req, &mp_reply, &ts);
if (ret || mp_reply.nb_received != 1) {
- RTE_LOG(ERR, EAL, "cannot send request to primary");
+ RTE_LOG(ERR, EAL, "Cannot send request to primary\n");
if (!ret)
return -1;
return ret;
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
index ce38350b..797381c0 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
@@ -63,11 +63,7 @@ extern "C" {
* Guarantees that the STORE operations generated before the barrier
* occur before the STORE operations generated after.
*/
-#ifdef RTE_ARCH_64
-#define rte_wmb() asm volatile("lwsync" : : : "memory")
-#else
#define rte_wmb() asm volatile("sync" : : : "memory")
-#endif
/**
* Read memory barrier.
@@ -75,11 +71,7 @@ extern "C" {
* Guarantees that the LOAD operations generated before the barrier
* occur before the LOAD operations generated after.
*/
-#ifdef RTE_ARCH_64
-#define rte_rmb() asm volatile("lwsync" : : : "memory")
-#else
#define rte_rmb() asm volatile("sync" : : : "memory")
-#endif
#define rte_smp_mb() rte_mb()
diff --git a/lib/librte_eal/common/include/generic/rte_cycles.h b/lib/librte_eal/common/include/generic/rte_cycles.h
index ac379e87..d318b91a 100644
--- a/lib/librte_eal/common/include/generic/rte_cycles.h
+++ b/lib/librte_eal/common/include/generic/rte_cycles.h
@@ -173,7 +173,7 @@ rte_delay_us_sleep(unsigned int us);
*
* @param userfunc
* User function which replaces rte_delay_us. rte_delay_us_block restores
- * buildin block delay function.
+ * builtin block delay function.
*/
void rte_delay_us_callback_register(void(*userfunc)(unsigned int));
diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h
index 5751a0e6..2c284f0b 100644
--- a/lib/librte_eal/common/include/generic/rte_rwlock.h
+++ b/lib/librte_eal/common/include/generic/rte_rwlock.h
@@ -64,14 +64,14 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
int success = 0;
while (success == 0) {
- x = rwl->cnt;
+ x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* write lock is held */
if (x < 0) {
rte_pause();
continue;
}
- success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
- (uint32_t)x, (uint32_t)(x + 1));
+ success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
}
@@ -84,7 +84,7 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
static inline void
rte_rwlock_read_unlock(rte_rwlock_t *rwl)
{
- rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+ __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
}
/**
@@ -100,14 +100,14 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
int success = 0;
while (success == 0) {
- x = rwl->cnt;
+ x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* a lock is held */
if (x != 0) {
rte_pause();
continue;
}
- success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
- 0, (uint32_t)-1);
+ success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
}
@@ -120,7 +120,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
static inline void
rte_rwlock_write_unlock(rte_rwlock_t *rwl)
{
- rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+ __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
}
/**
diff --git a/lib/librte_eal/common/include/generic/rte_spinlock.h b/lib/librte_eal/common/include/generic/rte_spinlock.h
index c4c3fc31..87ae7a4f 100644
--- a/lib/librte_eal/common/include/generic/rte_spinlock.h
+++ b/lib/librte_eal/common/include/generic/rte_spinlock.h
@@ -61,9 +61,14 @@ rte_spinlock_lock(rte_spinlock_t *sl);
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
{
- while (__sync_lock_test_and_set(&sl->locked, 1))
- while(sl->locked)
+ int exp = 0;
+
+ while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
rte_pause();
+ exp = 0;
+ }
}
#endif
@@ -80,7 +85,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl);
static inline void
rte_spinlock_unlock (rte_spinlock_t *sl)
{
- __sync_lock_release(&sl->locked);
+ __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
}
#endif
@@ -99,7 +104,10 @@ rte_spinlock_trylock (rte_spinlock_t *sl);
static inline int
rte_spinlock_trylock (rte_spinlock_t *sl)
{
- return __sync_lock_test_and_set(&sl->locked,1) == 0;
+ int exp = 0;
+ return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
+ 0, /* disallow spurious failure */
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
#endif
@@ -113,7 +121,7 @@ rte_spinlock_trylock (rte_spinlock_t *sl)
*/
static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
{
- return sl->locked;
+ return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
}
/**
diff --git a/lib/librte_eal/common/include/generic/rte_vect.h b/lib/librte_eal/common/include/generic/rte_vect.h
index 11c6475b..3fc47979 100644
--- a/lib/librte_eal/common/include/generic/rte_vect.h
+++ b/lib/librte_eal/common/include/generic/rte_vect.h
@@ -55,7 +55,7 @@ typedef uint16_t rte_v128u16_t __attribute__((vector_size(16), aligned(16)));
/**
* 128 bits vector size to use with unsigned 32 bits elements.
*
- * a = (rte_v128u32_t){ a0, a1, a2, a3, a4 }
+ * a = (rte_v128u32_t){ a0, a1, a2, a3 }
*/
typedef uint32_t rte_v128u32_t __attribute__((vector_size(16), aligned(16)));
diff --git a/lib/librte_eal/common/include/rte_class.h b/lib/librte_eal/common/include/rte_class.h
index 276c91e9..856d09b2 100644
--- a/lib/librte_eal/common/include/rte_class.h
+++ b/lib/librte_eal/common/include/rte_class.h
@@ -15,7 +15,7 @@
*
* A device class defines the type of function a device
* will be used for e.g.: Ethernet adapter (eth),
- * cryptographic coprocessor (crypto), etc.
+ * cryptographic co-processor (crypto), etc.
*/
#ifdef __cplusplus
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index 66cdf60b..48bf28ca 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -340,7 +340,7 @@ rte_is_power_of_2(uint32_t n)
* Aligns input parameter to the next power of 2
*
* @param x
- * The integer value to algin
+ * The integer value to align
*
* @return
* Input parameter aligned to the next power of 2
@@ -358,7 +358,7 @@ rte_align32pow2(uint32_t x)
* Aligns input parameter to the previous power of 2
*
* @param x
- * The integer value to algin
+ * The integer value to align
*
* @return
* Input parameter aligned to the previous power of 2
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index a0cedd57..9951228e 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -228,6 +228,13 @@ struct rte_mp_reply {
*
* As we create socket channel for primary/secondary communication, use
* this function typedef to register action for coming messages.
+ *
+ * @note When handling IPC request callbacks, the reply must be sent even in
+ * cases of error handling. Simply returning success or failure will *not*
+ * send a response to the requestor.
+ * Implementation of error signalling mechanism is up to the application.
+ *
+ * @note No memory allocations should take place inside the callback.
*/
typedef int (*rte_mp_t)(const struct rte_mp_msg *msg, const void *peer);
@@ -237,6 +244,13 @@ typedef int (*rte_mp_t)(const struct rte_mp_msg *msg, const void *peer);
* As we create socket channel for primary/secondary communication, use
* this function typedef to register action for coming responses to asynchronous
* requests.
+ *
+ * @note When handling IPC request callbacks, the reply must be sent even in
+ * cases of error handling. Simply returning success or failure will *not*
+ * send a response to the requestor.
+ * Implementation of error signalling mechanism is up to the application.
+ *
+ * @note No memory allocations should take place inside the callback.
*/
typedef int (*rte_mp_async_reply_t)(const struct rte_mp_msg *request,
const struct rte_mp_reply *reply);
@@ -287,7 +301,7 @@ rte_mp_action_unregister(const char *name);
*
* Send a message to the peer process.
*
- * This function will send a message which will be responsed by the action
+ * This function will send a message which will be responded by the action
* identified by name in the peer process.
*
* @param msg
@@ -311,6 +325,9 @@ rte_mp_sendmsg(struct rte_mp_msg *msg);
*
* @note The caller is responsible to free reply->replies.
*
+ * @note This API must not be used inside memory-related or IPC callbacks, and
+ * no memory allocations should take place inside such callback.
+ *
* @param req
* The req argument contains the customized request message.
*
@@ -364,6 +381,11 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
* This function will send a reply message in response to a request message
* received previously.
*
+ * @note When handling IPC request callbacks, the reply must be sent even in
+ * cases of error handling. Simply returning success or failure will *not*
+ * send a response to the requestor.
+ * Implementation of error signalling mechanism is up to the application.
+ *
* @param msg
* The msg argument contains the customized message.
*
@@ -424,7 +446,7 @@ rte_set_application_usage_hook(rte_usage_hook_t usage_func);
#define RTE_EAL_TAILQ_RWLOCK (&rte_eal_get_configuration()->mem_config->qlock)
/**
- * macro to get the multiple lock of mempool shared by mutiple-instance
+ * macro to get the multiple lock of mempool shared by multiple-instance
*/
#define RTE_EAL_MEMPOOL_RWLOCK (&rte_eal_get_configuration()->mem_config->mplock)
diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h
index 6e09d918..dea17f50 100644
--- a/lib/librte_eal/common/include/rte_lcore.h
+++ b/lib/librte_eal/common/include/rte_lcore.h
@@ -23,10 +23,18 @@ extern "C" {
#define LCORE_ID_ANY UINT32_MAX /**< Any lcore. */
#if defined(__linux__)
- typedef cpu_set_t rte_cpuset_t;
+typedef cpu_set_t rte_cpuset_t;
+#define RTE_CPU_AND(dst, src1, src2) CPU_AND(dst, src1, src2)
#elif defined(__FreeBSD__)
#include <pthread_np.h>
- typedef cpuset_t rte_cpuset_t;
+typedef cpuset_t rte_cpuset_t;
+#define RTE_CPU_AND(dst, src1, src2) do \
+{ \
+ cpuset_t tmp; \
+ CPU_COPY(src1, &tmp); \
+ CPU_AND(&tmp, src2); \
+ CPU_COPY(&tmp, dst); \
+} while (0)
#endif
/**
@@ -280,8 +288,9 @@ int rte_thread_setname(pthread_t id, const char *name);
* Create a control thread.
*
* Wrapper to pthread_create(), pthread_setname_np() and
- * pthread_setaffinity_np(). The dataplane and service lcores are
- * excluded from the affinity of the new thread.
+ * pthread_setaffinity_np(). The affinity of the new thread is based
+ * on the CPU affinity retrieved at the time rte_eal_init() was called,
+ * the dataplane and service lcores are then excluded.
*
* @param thread
* Filled with the thread id of the new created thread.
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 2f789cb9..213043c4 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -36,7 +36,7 @@ struct rte_logs {
struct rte_log_dynamic_type *dynamic_types;
};
-/** Global log informations */
+/** Global log information */
extern struct rte_logs rte_logs;
/* SDK log type */
diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h
index 54a12467..e0be13ca 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -111,7 +111,7 @@ rte_calloc(const char *type, size_t num, size_t size, unsigned align);
/**
* Replacement function for realloc(), using huge-page memory. Reserved area
* memory is resized, preserving contents. In NUMA systems, the new area
- * resides on the same NUMA socket as the old area.
+ * may not reside on the same NUMA node as the old one.
*
* @param ptr
* Pointer to already allocated memory
diff --git a/lib/librte_eal/common/include/rte_service.h b/lib/librte_eal/common/include/rte_service.h
index 34b41aff..11f67350 100644
--- a/lib/librte_eal/common/include/rte_service.h
+++ b/lib/librte_eal/common/include/rte_service.h
@@ -337,7 +337,7 @@ int32_t rte_service_set_stats_enable(uint32_t id, int32_t enable);
int32_t rte_service_lcore_list(uint32_t array[], uint32_t n);
/**
- * Get the numer of services running on the supplied lcore.
+ * Get the number of services running on the supplied lcore.
*
* @param lcore Id of the service core.
* @retval >=0 Number of services registered to this core.
diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h
index 9a2a1ff9..35c6b003 100644
--- a/lib/librte_eal/common/include/rte_string_fns.h
+++ b/lib/librte_eal/common/include/rte_string_fns.h
@@ -59,10 +59,25 @@ rte_strlcpy(char *dst, const char *src, size_t size)
return (size_t)snprintf(dst, size, "%s", src);
}
+/**
+ * @internal
+ * DPDK-specific version of strlcat for systems without
+ * libc or libbsd copies of the function
+ */
+static inline size_t
+rte_strlcat(char *dst, const char *src, size_t size)
+{
+ size_t l = strnlen(dst, size);
+ if (l < size)
+ return l + rte_strlcpy(&dst[l], src, size - l);
+ return l + strlen(src);
+}
+
/* pull in a strlcpy function */
#ifdef RTE_EXEC_ENV_BSDAPP
#ifndef __BSD_VISIBLE /* non-standard functions are hidden */
#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size)
+#define strlcat(dst, src, size) rte_strlcat(dst, src, size)
#endif
#else /* non-BSD platforms */
@@ -71,6 +86,7 @@ rte_strlcpy(char *dst, const char *src, size_t size)
#else /* no BSD header files, create own */
#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size)
+#define strlcat(dst, src, size) rte_strlcat(dst, src, size)
#endif /* RTE_USE_LIBBSD */
#endif /* BSDAPP */
diff --git a/lib/librte_eal/common/include/rte_tailq.h b/lib/librte_eal/common/include/rte_tailq.h
index 9b01abb2..b6fe4e5f 100644
--- a/lib/librte_eal/common/include/rte_tailq.h
+++ b/lib/librte_eal/common/include/rte_tailq.h
@@ -53,7 +53,7 @@ struct rte_tailq_elem {
};
/**
- * Return the first tailq entry casted to the right struct.
+ * Return the first tailq entry cast to the right struct.
*/
#define RTE_TAILQ_CAST(tailq_entry, struct_name) \
(struct struct_name *)&(tailq_entry)->tailq_head
diff --git a/lib/librte_eal/common/include/rte_uuid.h b/lib/librte_eal/common/include/rte_uuid.h
index 2c846b5f..16bbed32 100644
--- a/lib/librte_eal/common/include/rte_uuid.h
+++ b/lib/librte_eal/common/include/rte_uuid.h
@@ -43,7 +43,7 @@ extern "C" {
#include <stdbool.h>
/**
- * Struct describing a Universal Unique Identifer
+ * Struct describing a Universal Unique Identifier
*/
typedef unsigned char rte_uuid_t[16];
@@ -105,7 +105,7 @@ int rte_uuid_compare(const rte_uuid_t a, const rte_uuid_t b);
* @param uu
* Destination UUID
* @return
- * Returns 0 on succes, and -1 if string is not a valid UUID.
+ * Returns 0 on success, and -1 if string is not a valid UUID.
*/
int rte_uuid_parse(const char *in, rte_uuid_t uu);
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index b4c6dd3c..7c0b13b5 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -37,7 +37,7 @@ extern "C" {
/**
* Patch level number i.e. the z in yy.mm.z
*/
-#define RTE_VER_MINOR 1
+#define RTE_VER_MINOR 2
/**
* Extra string to be appended to version number
diff --git a/lib/librte_eal/common/include/rte_vfio.h b/lib/librte_eal/common/include/rte_vfio.h
index cae96fab..d837f1e7 100644
--- a/lib/librte_eal/common/include/rte_vfio.h
+++ b/lib/librte_eal/common/include/rte_vfio.h
@@ -178,7 +178,7 @@ int rte_vfio_noiommu_is_enabled(void);
* an error on BSD.
*
* @param vfio_group_fd
- * VFIO Grouup FD.
+ * VFIO Group FD.
*
* @return
* 0 on success.
@@ -291,6 +291,10 @@ rte_vfio_get_group_fd(int iommu_group_num);
* containers by default, user needs to manage DMA mappings for
* any container created by this API.
*
+ * @note When creating containers using this API, the container will only be
+ * available in the process that has created it. Sharing containers and
+ * devices between multiple processes is not supported.
+ *
* @return
* the container fd if successful
* <0 if failed
diff --git a/lib/librte_eal/common/malloc_mp.c b/lib/librte_eal/common/malloc_mp.c
index f3a13353..b470565e 100644
--- a/lib/librte_eal/common/malloc_mp.c
+++ b/lib/librte_eal/common/malloc_mp.c
@@ -501,7 +501,7 @@ handle_rollback_response(const struct rte_mp_msg *request,
/* lock the request */
pthread_mutex_lock(&mp_request_list.lock);
- memset(&msg, 0, sizeof(0));
+ memset(&msg, 0, sizeof(msg));
entry = find_request_by_id(mpreq->id);
if (entry == NULL) {
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 30138b63..7a08cf1e 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -320,7 +320,7 @@ rte_eal_config_create(void)
rte_mem_cfg_addr = NULL;
if (mem_cfg_fd < 0){
- mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
+ mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0600);
if (mem_cfg_fd < 0)
rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
}
@@ -1000,6 +1000,12 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ if (rte_eal_alarm_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread");
+ /* rte_eal_alarm_init sets rte_errno on failure. */
+ return -1;
+ }
+
/* Put mp channel init before bus scan so that we can init the vdev
* bus through mp channel in the secondary process before the bus scan.
*/
@@ -1120,12 +1126,6 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- if (rte_eal_alarm_init() < 0) {
- rte_eal_init_alert("Cannot init interrupt-handling thread");
- /* rte_eal_alarm_init sets rte_errno on failure. */
- return -1;
- }
-
if (rte_eal_timer_init() < 0) {
rte_eal_init_alert("Cannot init HPET or TSC timers");
rte_errno = ENOTSUP;
@@ -1214,8 +1214,11 @@ rte_eal_init(int argc, char **argv)
* whether we are primary or secondary process, but we cannot remove
* primary process' files because secondary should be able to run even
* if primary process is dead.
+ *
+ * In no_shconf mode, no runtime directory is created in the first
+ * place, so no cleanup needed.
*/
- if (eal_clean_runtime_dir() < 0) {
+ if (!internal_config.no_shconf && eal_clean_runtime_dir() < 0) {
rte_eal_init_alert("Cannot clear runtime directory\n");
return -1;
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_dev.c b/lib/librte_eal/linuxapp/eal/eal_dev.c
index 2830c868..c4180938 100644
--- a/lib/librte_eal/linuxapp/eal/eal_dev.c
+++ b/lib/librte_eal/linuxapp/eal/eal_dev.c
@@ -66,8 +66,8 @@ static void sigbus_handler(int signum, siginfo_t *info,
{
int ret;
- RTE_LOG(DEBUG, EAL, "Thread[%d] catch SIGBUS, fault address:%p\n",
- (int)pthread_self(), info->si_addr);
+ RTE_LOG(DEBUG, EAL, "Thread catch SIGBUS, fault address:%p\n",
+ info->si_addr);
rte_spinlock_lock(&failure_handle_lock);
ret = rte_bus_sigbus_handler(info->si_addr);
diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
index 0eab1cf7..6e898c24 100644
--- a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
@@ -45,7 +45,7 @@ static void *
map_shared_memory(const char *filename, const size_t mem_size, int flags)
{
void *retval;
- int fd = open(filename, flags, 0666);
+ int fd = open(filename, flags, 0600);
if (fd < 0)
return NULL;
if (ftruncate(fd, mem_size) < 0) {
diff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
index f63d9ca6..81b441a9 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memalloc.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
@@ -740,6 +740,10 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
__func__, socket_id, cur_socket_id);
goto mapped;
}
+#else
+ if (rte_socket_count() > 1)
+ RTE_LOG(DEBUG, EAL, "%s(): not checking hugepage NUMA node.\n",
+ __func__);
#endif
ms->addr = addr;
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e05da74c..898bdb77 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -46,6 +46,7 @@
#include "eal_internal_cfg.h"
#include "eal_filesystem.h"
#include "eal_hugepages.h"
+#include "eal_options.h"
#define PFN_MASK_SIZE 8
@@ -110,7 +111,7 @@ rte_mem_virt2phy(const void *virtaddr)
fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0) {
- RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
+ RTE_LOG(INFO, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
__func__, strerror(errno));
return RTE_BAD_IOVA;
}
@@ -118,7 +119,7 @@ rte_mem_virt2phy(const void *virtaddr)
virt_pfn = (unsigned long)virtaddr / page_size;
offset = sizeof(uint64_t) * virt_pfn;
if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
- RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
+ RTE_LOG(INFO, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
__func__, strerror(errno));
close(fd);
return RTE_BAD_IOVA;
@@ -127,11 +128,11 @@ rte_mem_virt2phy(const void *virtaddr)
retval = read(fd, &page, PFN_MASK_SIZE);
close(fd);
if (retval < 0) {
- RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
+ RTE_LOG(INFO, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
__func__, strerror(errno));
return RTE_BAD_IOVA;
} else if (retval != PFN_MASK_SIZE) {
- RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
+ RTE_LOG(INFO, EAL, "%s(): read %d bytes from /proc/self/pagemap "
"but expected %d:\n",
__func__, retval, PFN_MASK_SIZE);
return RTE_BAD_IOVA;
@@ -536,7 +537,7 @@ create_shared_memory(const char *filename, const size_t mem_size)
return retval;
}
- fd = open(filename, O_CREAT | O_RDWR, 0666);
+ fd = open(filename, O_CREAT | O_RDWR, 0600);
if (fd < 0)
return NULL;
if (ftruncate(fd, mem_size) < 0) {
@@ -1392,7 +1393,7 @@ eal_legacy_hugepage_init(void)
if (mcfg->dma_maskbits &&
rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
- "%s(): couldnt allocate memory due to IOVA exceeding limits of current DMA mask.\n",
+ "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
__func__);
if (rte_eal_iova_mode() == RTE_IOVA_VA &&
rte_eal_using_phys_addrs())
@@ -2038,7 +2039,8 @@ memseg_primary_init_32(void)
socket_id = rte_socket_id_by_idx(i);
#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
- if (socket_id > 0)
+ /* we can still sort pages by socket in legacy mode */
+ if (!internal_config.legacy_mem && socket_id > 0)
break;
#endif
@@ -2219,7 +2221,8 @@ memseg_primary_init(void)
int socket_id = rte_socket_id_by_idx(i);
#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
- if (socket_id > 0)
+ /* we can still sort pages by socket in legacy mode */
+ if (!internal_config.legacy_mem && socket_id > 0)
break;
#endif
memtypes[cur_type].page_sz = hugepage_sz;
@@ -2378,6 +2381,13 @@ rte_eal_memseg_init(void)
} else {
RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
}
+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (!internal_config.legacy_mem && rte_socket_count() > 1) {
+ RTE_LOG(WARNING, EAL, "DPDK is running on a NUMA system, but is compiled without NUMA support.\n");
+ RTE_LOG(WARNING, EAL, "This will have adverse consequences for performance and usability.\n");
+ RTE_LOG(WARNING, EAL, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.\n");
+ }
+#endif
return rte_eal_process_type() == RTE_PROC_PRIMARY ?
#ifndef RTE_ARCH_64
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 5afa0871..5db5a133 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -127,7 +127,7 @@ struct rte_kni_device_info {
/* mbuf size */
unsigned mbuf_size;
unsigned int mtu;
- char mac_addr[6];
+ uint8_t mac_addr[6];
};
#define KNI_DEVICE "kni"
diff --git a/lib/librte_efd/rte_efd.h b/lib/librte_efd/rte_efd.h
index 2ace008e..c2be4c09 100644
--- a/lib/librte_efd/rte_efd.h
+++ b/lib/librte_efd/rte_efd.h
@@ -191,7 +191,7 @@ rte_efd_find_existing(const char *name);
* This operation was still successful, and entry contains a valid update
* RTE_EFD_UPDATE_FAILED
* Either the EFD failed to find a suitable perfect hash or the group was full
- * This is a fatal error, and the table is now in an indeterminite state
+ * This is a fatal error, and the table is now in an indeterminate state
* RTE_EFD_UPDATE_NO_CHANGE
* Operation resulted in no change to the table (same value already exists)
* 0 - success
diff --git a/lib/librte_ethdev/rte_eth_ctrl.h b/lib/librte_ethdev/rte_eth_ctrl.h
index 5ea8ae24..925a63f2 100644
--- a/lib/librte_ethdev/rte_eth_ctrl.h
+++ b/lib/librte_ethdev/rte_eth_ctrl.h
@@ -589,7 +589,7 @@ struct rte_eth_fdir_masks {
uint16_t vlan_tci_mask; /**< Bit mask for vlan_tci in big endian */
/** Bit mask for ipv4 flow in big endian. */
struct rte_eth_ipv4_flow ipv4_mask;
- /** Bit maks for ipv6 flow in big endian. */
+ /** Bit mask for ipv6 flow in big endian. */
struct rte_eth_ipv6_flow ipv6_mask;
/** Bit mask for L4 source port in big endian. */
uint16_t src_port_mask;
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 9d5107dc..191658da 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -48,7 +48,6 @@ int rte_eth_dev_logtype;
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static uint16_t eth_dev_last_created_port;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -431,8 +430,6 @@ eth_dev_get(uint16_t port_id)
eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
- eth_dev_last_created_port = port_id;
-
return eth_dev;
}
@@ -1646,7 +1643,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
local_conf.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
- "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+ "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
port_id, rx_queue_id, local_conf.offloads,
dev_info.rx_queue_offload_capa,
__func__);
@@ -1750,7 +1747,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
local_conf.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
- "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+ "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
port_id, tx_queue_id, local_conf.offloads,
dev_info.tx_queue_offload_capa,
__func__);
diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index a3c864a1..0e353619 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -1135,7 +1135,7 @@ struct rte_eth_dev_info {
/**
* Ethernet device RX queue information structure.
- * Used to retieve information about configured queue.
+ * Used to retrieve information about configured queue.
*/
struct rte_eth_rxq_info {
struct rte_mempool *mp; /**< mempool used by that queue. */
@@ -1551,14 +1551,14 @@ const char *rte_eth_dev_tx_offload_name(uint64_t offload);
* Applications should set the ignore_bitfield_offloads bit on *rxmode*
* structure and use offloads field to set per-port offloads instead.
* - Any offloading set in eth_conf->[rt]xmode.offloads must be within
- * the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
+ * the [rt]x_offload_capa returned from rte_eth_dev_info_get().
* Any type of device supported offloading set in the input argument
* eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
* on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup()
* - the Receive Side Scaling (RSS) configuration when using multiple RX
* queues per port. Any RSS hash function set in eth_conf->rss_conf.rss_hf
* must be within the flow_type_rss_offloads provided by drivers via
- * rte_eth_dev_infos_get() API.
+ * rte_eth_dev_info_get() API.
*
* Embedding all configuration information in a single data structure
* is the more flexible method that allows the addition of new features
@@ -2101,7 +2101,7 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
* A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if size is 0. In this case function will retrieve
- * all avalible statistics.
+ * all available statistics.
* @param values
* A pointer to a table to be filled with device statistics values.
* @param size
@@ -2542,7 +2542,7 @@ rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
/**
* Request the driver to free mbufs currently cached by the driver. The
* driver will only free the mbuf if it is no longer in use. It is the
- * application's responsibity to ensure rte_eth_tx_buffer_flush(..) is
+ * application's responsibility to ensure rte_eth_tx_buffer_flush(..) is
* called if needed.
*
* @param port_id
diff --git a/lib/librte_ethdev/rte_ethdev_core.h b/lib/librte_ethdev/rte_ethdev_core.h
index 8f03f83f..16300b14 100644
--- a/lib/librte_ethdev/rte_ethdev_core.h
+++ b/lib/librte_ethdev/rte_ethdev_core.h
@@ -105,7 +105,7 @@ typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-/**< @internal Get specific informations of an Ethernet device. */
+/**< @internal Get specific information of an Ethernet device. */
typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
/**< @internal Get supported ptypes of an Ethernet device. */
@@ -367,7 +367,7 @@ typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
typedef int (*eth_mtr_ops_get_t)(struct rte_eth_dev *dev, void *ops);
-/**< @internal Get Trafffic Metering and Policing (MTR) operations */
+/**< @internal Get Traffic Metering and Policing (MTR) operations */
typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
diff --git a/lib/librte_ethdev/rte_ethdev_driver.h b/lib/librte_ethdev/rte_ethdev_driver.h
index c2ac2632..f2274809 100644
--- a/lib/librte_ethdev/rte_ethdev_driver.h
+++ b/lib/librte_ethdev/rte_ethdev_driver.h
@@ -317,7 +317,7 @@ typedef int (*ethdev_uninit_t)(struct rte_eth_dev *ethdev);
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
- * PMD helper function for cleaing up the resources of a ethdev port on it's
+ * PMD helper function for cleaning up the resources of a ethdev port on it's
* destruction.
*
* @param ethdev
diff --git a/lib/librte_ethdev/rte_tm.h b/lib/librte_ethdev/rte_tm.h
index 646ef388..af23c0f5 100644
--- a/lib/librte_ethdev/rte_tm.h
+++ b/lib/librte_ethdev/rte_tm.h
@@ -378,7 +378,7 @@ struct rte_tm_capabilities {
uint32_t sched_wfq_weight_max;
/** WRED packet mode support. When non-zero, this parameter indicates
- * that there is atleast one leaf node that supports the WRED packet
+ * that there is at least one leaf node that supports the WRED packet
* mode, which might not be true for all the leaf nodes. In packet
* mode, the WRED thresholds specify the queue length in packets, as
* opposed to bytes.
@@ -386,7 +386,7 @@ struct rte_tm_capabilities {
int cman_wred_packet_mode_supported;
/** WRED byte mode support. When non-zero, this parameter indicates that
- * there is atleast one leaf node that supports the WRED byte mode,
+ * there is at least one leaf node that supports the WRED byte mode,
* which might not be true for all the leaf nodes. In byte mode, the
* WRED thresholds specify the queue length in bytes, as opposed to
* packets.
@@ -645,7 +645,7 @@ struct rte_tm_level_capabilities {
uint32_t shaper_shared_n_max;
/** WRED packet mode support. When non-zero, this
- * parameter indicates that there is atleast one leaf
+ * parameter indicates that there is at least one leaf
* node on this level that supports the WRED packet
* mode, which might not be true for all the leaf
* nodes. In packet mode, the WRED thresholds specify
@@ -654,7 +654,7 @@ struct rte_tm_level_capabilities {
int cman_wred_packet_mode_supported;
/** WRED byte mode support. When non-zero, this
- * parameter indicates that there is atleast one leaf
+ * parameter indicates that there is at least one leaf
* node on this level that supports the WRED byte mode,
* which might not be true for all the leaf nodes. In
* byte mode, the WRED thresholds specify the queue
diff --git a/lib/librte_eventdev/rte_event_crypto_adapter.c b/lib/librte_eventdev/rte_event_crypto_adapter.c
index 11b28ca9..5faf3c90 100644
--- a/lib/librte_eventdev/rte_event_crypto_adapter.c
+++ b/lib/librte_eventdev/rte_event_crypto_adapter.c
@@ -159,6 +159,9 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id,
struct rte_event_port_conf *port_conf = arg;
struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
dev = &rte_eventdevs[adapter->eventdev_id];
dev_conf = dev->data->dev_conf;
@@ -353,7 +356,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
cdev_id = m_data->request_info.cdev_id;
qp_id = m_data->request_info.queue_pair_id;
qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
- if (qp_info == NULL) {
+ if (!qp_info->qp_enabled) {
rte_pktmbuf_free(crypto_op->sym->m_src);
rte_crypto_op_free(crypto_op);
continue;
@@ -369,7 +372,7 @@ eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
cdev_id = m_data->request_info.cdev_id;
qp_id = m_data->request_info.queue_pair_id;
qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
- if (qp_info == NULL) {
+ if (!qp_info->qp_enabled) {
rte_pktmbuf_free(crypto_op->sym->m_src);
rte_crypto_op_free(crypto_op);
continue;
@@ -427,10 +430,9 @@ eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
ret = 0;
for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
curr_dev = &adapter->cdevs[cdev_id];
- if (curr_dev == NULL)
- continue;
dev = curr_dev->dev;
-
+ if (dev == NULL)
+ continue;
for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
curr_queue = &curr_dev->qpairs[qp];
@@ -579,9 +581,9 @@ eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
for (cdev_id = adapter->next_cdev_id;
cdev_id < num_cdev; cdev_id++) {
curr_dev = &adapter->cdevs[cdev_id];
- if (curr_dev == NULL)
- continue;
dev = curr_dev->dev;
+ if (dev == NULL)
+ continue;
dev_qps = dev->data->nb_queue_pairs;
for (qp = curr_dev->next_queue_pair_id;
diff --git a/lib/librte_eventdev/rte_event_crypto_adapter.h b/lib/librte_eventdev/rte_event_crypto_adapter.h
index d367309c..9ac8e6f4 100644
--- a/lib/librte_eventdev/rte_event_crypto_adapter.h
+++ b/lib/librte_eventdev/rte_event_crypto_adapter.h
@@ -139,7 +139,7 @@
* - rte_event_crypto_adapter_stats_get()
* - rte_event_crypto_adapter_stats_reset()
- * The applicaton creates an instance using rte_event_crypto_adapter_create()
+ * The application creates an instance using rte_event_crypto_adapter_create()
* or rte_event_crypto_adapter_create_ext().
*
* Cryptodev queue pair addition/deletion is done using the
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index 8d178be1..627875a9 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -872,7 +872,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
break;
}
- if (buf->count >= BATCH_SIZE)
+ if (buf->count > 0)
rxa_flush_event_buffer(rx_adapter);
return nb_rx;
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
index 863b72a1..bb14bb2d 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -66,9 +66,9 @@
* For SW based packet transfers, i.e., when the
* RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
* capabilities flags for a particular ethernet device, the service function
- * temporarily enqueues mbufs to an event buffer before batch enqueueing these
+ * temporarily enqueues mbufs to an event buffer before batch enqueuing these
* to the event device. If the buffer fills up, the service function stops
- * dequeueing packets from the ethernet device. The application may want to
+ * dequeuing packets from the ethernet device. The application may want to
* monitor the buffer fill level and instruct the service function to
* selectively buffer packets. The application may also use some other
* criteria to decide which packets should enter the event device even when
diff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.h b/lib/librte_eventdev/rte_event_eth_tx_adapter.h
index 81456d4a..7a4a01fa 100644
--- a/lib/librte_eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.h
@@ -365,7 +365,8 @@ rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
* which contain the event object enqueue operations to be processed.
* @param nb_events
* The number of event objects to enqueue, typically number of
- * rte_event_port_enqueue_depth() available for this port.
+ * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ * available for this port.
*
* @return
* The number of event objects actually enqueued on the event device. The
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index ef10a855..38608114 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -1155,7 +1155,7 @@ rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
*/
#define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
-/**< Flag indicates HW/SW suports a mechanism to store and retrieve
+/**< Flag indicates HW/SW supports a mechanism to store and retrieve
* the private data information along with the crypto session.
*/
@@ -1366,7 +1366,8 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
* which contain the event object enqueue operations to be processed.
* @param nb_events
* The number of event objects to enqueue, typically number of
- * rte_event_port_enqueue_depth() available for this port.
+ * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ * available for this port.
*
* @return
* The number of event objects actually enqueued on the event device. The
@@ -1381,7 +1382,7 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
* - -ENOSPC The event port was backpressured and unable to enqueue
* one or more events. This error code is only applicable to
* closed systems.
- * @see rte_event_port_enqueue_depth()
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
*/
static inline uint16_t
rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
@@ -1415,7 +1416,8 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
* which contain the event object enqueue operations to be processed.
* @param nb_events
* The number of event objects to enqueue, typically number of
- * rte_event_port_enqueue_depth() available for this port.
+ * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ * available for this port.
*
* @return
* The number of event objects actually enqueued on the event device. The
@@ -1430,7 +1432,8 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
* - -ENOSPC The event port was backpressured and unable to enqueue
* one or more events. This error code is only applicable to
* closed systems.
- * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
*/
static inline uint16_t
rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
@@ -1464,7 +1467,8 @@ rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
* which contain the event object enqueue operations to be processed.
* @param nb_events
* The number of event objects to enqueue, typically number of
- * rte_event_port_enqueue_depth() available for this port.
+ * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ * available for this port.
*
* @return
* The number of event objects actually enqueued on the event device. The
@@ -1479,7 +1483,8 @@ rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
* - -ENOSPC The event port was backpressured and unable to enqueue
* one or more events. This error code is only applicable to
* closed systems.
- * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
*/
static inline uint16_t
rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
@@ -1737,7 +1742,7 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
* @see rte_event_port_unlink() to issue unlink requests.
*
* @param dev_id
- * The indentifier of the device.
+ * The identifier of the device.
*
* @param port_id
* Event port identifier to select port to check for unlinks in progress.
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 1a01326b..d118b9e5 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -873,7 +873,7 @@ typedef int (*eventdev_eth_tx_adapter_free_t)(uint8_t id,
* Ethernet device pointer
*
* @param tx_queue_id
- * Transmt queue index
+ * Transmit queue index
*
* @return
* - 0: Success.
diff --git a/lib/librte_flow_classify/rte_flow_classify.h b/lib/librte_flow_classify/rte_flow_classify.h
index 56e06353..01e88e54 100644
--- a/lib/librte_flow_classify/rte_flow_classify.h
+++ b/lib/librte_flow_classify/rte_flow_classify.h
@@ -208,7 +208,7 @@ rte_flow_classify_validate(struct rte_flow_classifier *cls,
struct rte_flow_error *error);
/**
- * Add a flow classify rule to the flow_classifer table.
+ * Add a flow classify rule to the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle
@@ -235,7 +235,7 @@ rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
struct rte_flow_error *error);
/**
- * Delete a flow classify rule from the flow_classifer table.
+ * Delete a flow classify rule from the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index c01489ba..d7a5f4c2 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1536,14 +1536,19 @@ int __rte_experimental
rte_hash_free_key_with_position(const struct rte_hash *h,
const int32_t position)
{
- RETURN_IF_TRUE(((h == NULL) || (position == EMPTY_SLOT)), -EINVAL);
+ /* Key index where key is stored, adding the first dummy index */
+ uint32_t key_idx = position + 1;
+
+ RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
unsigned int lcore_id, n_slots;
struct lcore_cache *cached_free_slots;
- const int32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+ const uint32_t total_entries = h->use_local_cache ?
+ h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
+ : h->entries + 1;
/* Out of bounds */
- if (position >= total_entries)
+ if (key_idx >= total_entries)
return -EINVAL;
if (h->use_local_cache) {
@@ -1560,11 +1565,11 @@ rte_hash_free_key_with_position(const struct rte_hash *h,
}
/* Put index of new free slot in cache. */
cached_free_slots->objs[cached_free_slots->len] =
- (void *)((uintptr_t)position);
+ (void *)((uintptr_t)key_idx);
cached_free_slots->len++;
} else {
rte_ring_sp_enqueue(h->free_slots,
- (void *)((uintptr_t)position));
+ (void *)((uintptr_t)key_idx));
}
return 0;
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index c93d1a13..4432aef7 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -39,7 +39,7 @@ extern "C" {
/** Flag to support reader writer concurrency */
#define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY 0x04
-/** Flag to indicate the extendabe bucket table feature should be used */
+/** Flag to indicate the extendable bucket table feature should be used */
#define RTE_HASH_EXTRA_FLAGS_EXT_TABLE 0x08
/** Flag to disable freeing of key index on hash delete.
@@ -463,7 +463,7 @@ rte_hash_lookup_with_hash(const struct rte_hash *h,
/**
* Calc a hash value by key.
- * This operation is not multi-thread safe.
+ * This operation is not multi-process safe.
*
* @param h
* Hash table to look in.
diff --git a/lib/librte_ip_frag/rte_ip_frag.h b/lib/librte_ip_frag/rte_ip_frag.h
index 04fd9df5..bc4c100f 100644
--- a/lib/librte_ip_frag/rte_ip_frag.h
+++ b/lib/librte_ip_frag/rte_ip_frag.h
@@ -274,7 +274,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
/**
* This function implements reassembly of fragmented IPv4 packets.
- * Incoming mbufs should have its l2_len/l3_len fields setup correclty.
+ * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
*
* @param tbl
* Table where to lookup/add the fragmented packet.
diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h
index 02ca43b4..d44496c7 100644
--- a/lib/librte_kni/rte_kni.h
+++ b/lib/librte_kni/rte_kni.h
@@ -68,7 +68,7 @@ struct rte_kni_conf {
__extension__
uint8_t force_bind : 1; /* Flag to bind kernel thread */
- char mac_addr[ETHER_ADDR_LEN]; /* MAC address assigned to KNI */
+ uint8_t mac_addr[ETHER_ADDR_LEN]; /* MAC address assigned to KNI */
uint16_t mtu;
};
diff --git a/lib/librte_latencystats/rte_latencystats.h b/lib/librte_latencystats/rte_latencystats.h
index efcfa028..67120729 100644
--- a/lib/librte_latencystats/rte_latencystats.h
+++ b/lib/librte_latencystats/rte_latencystats.h
@@ -24,7 +24,7 @@ extern "C" {
* Note: This function pointer is for future flow based latency stats
* implementation.
*
- * Function type used for identifting flow types of a Rx packet.
+ * Function type used for identifying flow types of a Rx packet.
*
* The callback function is called on Rx for each packet.
* This function is used for flow based latency calculations.
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
index 21550444..b886f54b 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -442,7 +442,7 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
* @param hop
* Next hop of the most specific rule found for IP (valid on lookup hit only).
* This is an 4 elements array of two byte values.
- * If the lookup was succesfull for the given IP, then least significant byte
+ * If the lookup was successful for the given IP, then least significant byte
* of the corresponding element is the actual next hop and the most
* significant byte is zero.
* If the lookup for the given IP failed, then corresponding element would
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 3dbc6695..8eab5a83 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -209,7 +209,7 @@ extern "C" {
/**
* Outer UDP checksum offload flag. This flag is used for enabling
* outer UDP checksum in PMD. To use outer UDP checksum, the user needs to
- * 1) Enable the following in mbuff,
+ * 1) Enable the following in mbuf,
* a) Fill outer_l2_len and outer_l3_len in mbuf.
* b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
* c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
@@ -279,9 +279,11 @@ extern "C" {
#define PKT_TX_TUNNEL_MASK (0xFULL << 45)
/**
- * Second VLAN insertion (QinQ) flag.
+ * Double VLAN insertion (QinQ) request to driver, driver may offload the
+ * insertion based on device capability.
+ * mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set.
*/
-#define PKT_TX_QINQ (1ULL << 49) /**< TX packet with double VLAN inserted. */
+#define PKT_TX_QINQ (1ULL << 49)
/* this old name is deprecated */
#define PKT_TX_QINQ_PKT PKT_TX_QINQ
@@ -337,7 +339,9 @@ extern "C" {
#define PKT_TX_IPV6 (1ULL << 56)
/**
- * TX packet is a 802.1q VLAN packet.
+ * VLAN tag insertion request to driver, driver may offload the insertion
+ * based on the device capability.
+ * mbuf 'vlan_tci' field must be valid when this flag is set.
*/
#define PKT_TX_VLAN (1ULL << 57)
/* this old name is deprecated */
@@ -913,7 +917,7 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
{
/*
* The atomic_add is an expensive operation, so we don't want to
- * call it in the case where we know we are the uniq holder of
+ * call it in the case where we know we are the unique holder of
* this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
* operation has to be used because concurrent accesses on the
* reference counter can occur.
@@ -1286,7 +1290,7 @@ static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
* The given mbuf must have only one segment.
*
* @param m
- * The packet mbuf to be resetted.
+ * The packet mbuf to be reset.
*/
#define MBUF_INVALID_PORT UINT16_MAX
@@ -1459,7 +1463,7 @@ rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
* ``rte_pktmbuf_detach()``.
*
* Memory for shared data must be provided and user must initialize all of
- * the content properly, escpecially free callback and refcnt. The pointer
+ * the content properly, especially free callback and refcnt. The pointer
* of shared data will be stored in m->shinfo.
* ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few
* bytes at the end of buffer for the shared data, store free callback and
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index 23bc635f..17a2dd35 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -426,7 +426,7 @@ extern "C" {
*/
#define RTE_PTYPE_TUNNEL_ESP 0x00009000
/**
- * L2TP (Layer 2 Tunneling Protocol) tunnleing packet type.
+ * L2TP (Layer 2 Tunneling Protocol) tunneling packet type.
*
* Packet format:
* <'ether type'=0x0800
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 7c9cd9a2..fe2f3335 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -427,7 +427,7 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
- * Dequeue a number of contiquous object blocks from the external pool.
+ * Dequeue a number of contiguous object blocks from the external pool.
*/
typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
void **first_obj_table, unsigned int n);
@@ -1364,7 +1364,7 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
&cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
- * In the offchance that we are buffer constrained,
+ * In the off chance that we are buffer constrained,
* where we are not able to allocate cache + n, go to
* the ring directly. If that fails, we are truly out of
* buffers.
diff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h
index c2c5e249..e0d83111 100644
--- a/lib/librte_net/rte_ether.h
+++ b/lib/librte_net/rte_ether.h
@@ -408,7 +408,7 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)
vh = (struct vlan_hdr *) (nh + 1);
vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
- (*m)->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
+ (*m)->ol_flags &= ~(PKT_RX_VLAN_STRIPPED | PKT_TX_VLAN);
return 0;
}
diff --git a/lib/librte_power/power_acpi_cpufreq.c b/lib/librte_power/power_acpi_cpufreq.c
index cd5978d5..f7d3f9ca 100644
--- a/lib/librte_power/power_acpi_cpufreq.c
+++ b/lib/librte_power/power_acpi_cpufreq.c
@@ -12,8 +12,9 @@
#include <signal.h>
#include <limits.h>
-#include <rte_memcpy.h>
#include <rte_atomic.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
#include "power_acpi_cpufreq.h"
#include "power_common.h"
@@ -147,6 +148,8 @@ power_set_governor_userspace(struct rte_power_info *pi)
s = fgets(buf, sizeof(buf), f);
FOPS_OR_NULL_GOTO(s, out);
+ /* Strip off terminating '\n' */
+ strtok(buf, "\n");
/* Check if current governor is userspace */
if (strncmp(buf, POWER_GOVERNOR_USERSPACE,
diff --git a/lib/librte_power/rte_power.c b/lib/librte_power/rte_power.c
index 208b7919..cc05b0fa 100644
--- a/lib/librte_power/rte_power.c
+++ b/lib/librte_power/rte_power.c
@@ -2,7 +2,7 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
-#include <rte_atomic.h>
+#include <rte_spinlock.h>
#include "rte_power.h"
#include "power_acpi_cpufreq.h"
@@ -11,7 +11,7 @@
enum power_management_env global_default_env = PM_ENV_NOT_SET;
-volatile uint32_t global_env_cfg_status = 0;
+static rte_spinlock_t global_env_cfg_lock = RTE_SPINLOCK_INITIALIZER;
/* function pointers */
rte_power_freqs_t rte_power_freqs = NULL;
@@ -29,9 +29,15 @@ rte_power_get_capabilities_t rte_power_get_capabilities;
int
rte_power_set_env(enum power_management_env env)
{
- if (rte_atomic32_cmpset(&global_env_cfg_status, 0, 1) == 0) {
+ rte_spinlock_lock(&global_env_cfg_lock);
+
+ if (global_default_env != PM_ENV_NOT_SET) {
+ rte_spinlock_unlock(&global_env_cfg_lock);
return 0;
}
+
+ int ret = 0;
+
if (env == PM_ENV_ACPI_CPUFREQ) {
rte_power_freqs = power_acpi_cpufreq_freqs;
rte_power_get_freq = power_acpi_cpufreq_get_freq;
@@ -59,19 +65,25 @@ rte_power_set_env(enum power_management_env env)
} else {
RTE_LOG(ERR, POWER, "Invalid Power Management Environment(%d) set\n",
env);
- rte_power_unset_env();
- return -1;
+ ret = -1;
}
- global_default_env = env;
- return 0;
+
+ if (ret == 0)
+ global_default_env = env;
+ else
+ global_default_env = PM_ENV_NOT_SET;
+
+ rte_spinlock_unlock(&global_env_cfg_lock);
+ return ret;
}
void
rte_power_unset_env(void)
{
- if (rte_atomic32_cmpset(&global_env_cfg_status, 1, 0) != 0)
- global_default_env = PM_ENV_NOT_SET;
+ rte_spinlock_lock(&global_env_cfg_lock);
+ global_default_env = PM_ENV_NOT_SET;
+ rte_spinlock_unlock(&global_env_cfg_lock);
}
enum power_management_env
diff --git a/lib/librte_power/rte_power.h b/lib/librte_power/rte_power.h
index d70bc0b3..d7542c3f 100644
--- a/lib/librte_power/rte_power.h
+++ b/lib/librte_power/rte_power.h
@@ -25,7 +25,7 @@ enum power_management_env {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM};
/**
* Set the default power management implementation. If this is not called prior
* to rte_power_init(), then auto-detect of the environment will take place.
- * It is not thread safe.
+ * It is thread safe.
*
* @param env
* env. The environment in which to initialise Power Management for.
diff --git a/lib/librte_power/rte_power_empty_poll.c b/lib/librte_power/rte_power_empty_poll.c
index e6145462..15d4f050 100644
--- a/lib/librte_power/rte_power_empty_poll.c
+++ b/lib/librte_power/rte_power_empty_poll.c
@@ -156,11 +156,8 @@ update_training_stats(struct priority_worker *poll_stats,
{
RTE_SET_USED(specific_freq);
- char pfi_str[32];
uint64_t p0_empty_deq;
- sprintf(pfi_str, "%02d", freq);
-
if (poll_stats->cur_freq == freq &&
poll_stats->thresh[freq].trained == false) {
if (poll_stats->thresh[freq].cur_train_iter == 0) {
diff --git a/lib/librte_power/rte_power_empty_poll.h b/lib/librte_power/rte_power_empty_poll.h
index c1ad5c24..33f24e0c 100644
--- a/lib/librte_power/rte_power_empty_poll.h
+++ b/lib/librte_power/rte_power_empty_poll.h
@@ -59,7 +59,7 @@ struct freq_threshold {
uint32_t cur_train_iter;
};
-/* Each Worder Thread Empty Poll Stats */
+/* Each Worker Thread Empty Poll Stats */
struct priority_worker {
/* Current dequeue and throughput counts */
diff --git a/lib/librte_rawdev/rte_rawdev.h b/lib/librte_rawdev/rte_rawdev.h
index 684bfdb8..ed011ca2 100644
--- a/lib/librte_rawdev/rte_rawdev.h
+++ b/lib/librte_rawdev/rte_rawdev.h
@@ -25,7 +25,7 @@ extern "C" {
#include <rte_memory.h>
#include <rte_errno.h>
-/* Rawdevice object - essentially a void to be typecasted by implementation */
+/* Rawdevice object - essentially a void to be typecast by implementation */
typedef void *rte_rawdev_obj_t;
/**
@@ -244,7 +244,7 @@ rte_rawdev_close(uint16_t dev_id);
* @param dev_id
* Raw device identifiers
* @return
- * 0 for sucessful reset,
+ * 0 for successful reset,
* !0 for failure in resetting
*/
int
@@ -373,7 +373,7 @@ rte_rawdev_set_attr(uint16_t dev_id,
* @param dev_id
* The identifier of the device to configure.
* @param buffers
- * Collection of buffers for enqueueing
+ * Collection of buffers for enqueuing
* @param count
* Count of buffers to enqueue
* @param context
diff --git a/lib/librte_rawdev/rte_rawdev_pmd.h b/lib/librte_rawdev/rte_rawdev_pmd.h
index 811e51d0..5e6cf1d1 100644
--- a/lib/librte_rawdev/rte_rawdev_pmd.h
+++ b/lib/librte_rawdev/rte_rawdev_pmd.h
@@ -282,7 +282,7 @@ typedef uint16_t (*rawdev_queue_count_t)(struct rte_rawdev *dev);
* an opaque object representing context of the call; for example, an
* application can pass information about the queues on which enqueue needs
* to be done. Or, the enqueue operation might be passed reference to an
- * object containing a callback (agreed upon between applicatio and driver).
+ * object containing a callback (agreed upon between application and driver).
*
* @return
* >=0 Count of buffers successfully enqueued (0: no buffers enqueued)
@@ -463,7 +463,7 @@ typedef int (*rawdev_firmware_version_get_t)(struct rte_rawdev *dev,
rte_rawdev_obj_t version_info);
/**
- * Load firwmare from a buffer (DMA'able)
+ * Load firmware from a buffer (DMA'able)
*
* @param dev
* Raw device pointer
@@ -480,7 +480,7 @@ typedef int (*rawdev_firmware_load_t)(struct rte_rawdev *dev,
rte_rawdev_obj_t firmware_buf);
/**
- * Unload firwmare
+ * Unload firmware
*
* @param dev
* Raw device pointer
@@ -548,7 +548,7 @@ struct rte_rawdev_ops {
/**< Reset the statistics values in xstats. */
rawdev_xstats_reset_t xstats_reset;
- /**< Obtainer firmware status */
+ /**< Obtain firmware status */
rawdev_firmware_status_get_t firmware_status_get;
/**< Obtain firmware version information */
rawdev_firmware_version_get_t firmware_version_get;
diff --git a/lib/librte_reorder/rte_reorder.h b/lib/librte_reorder/rte_reorder.h
index 1bcc2e32..6d397100 100644
--- a/lib/librte_reorder/rte_reorder.h
+++ b/lib/librte_reorder/rte_reorder.h
@@ -70,7 +70,7 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize,
* and return a pointer to it.
*
* @param name
- * Name of the reorder buffer instacne as passed to rte_reorder_create()
+ * Name of the reorder buffer instance as passed to rte_reorder_create()
* @return
* Pointer to reorder buffer instance or NULL if object not found with rte_errno
* set appropriately. Possible rte_errno values include:
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index d215acec..550549db 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -189,7 +189,8 @@ rte_ring_free(struct rte_ring *r)
* therefore, there is no memzone to free.
*/
if (r->memzone == NULL) {
- RTE_LOG(ERR, RING, "Cannot free ring (not created with rte_ring_create()");
+ RTE_LOG(ERR, RING,
+ "Cannot free ring, not created with rte_ring_create()\n");
return;
}
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index af5444a9..e265e947 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -57,7 +57,7 @@ enum rte_ring_queue_behavior {
};
#define RTE_RING_MZ_PREFIX "RG_"
-/**< The maximum length of a ring name. */
+/** The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)
@@ -302,7 +302,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* (powerpc/arm).
* There are 2 choices for the users
* 1.use rmb() memory barrier
- * 2.use one-direcion load_acquire/store_release barrier,defined by
+ * 2.use one-direction load_acquire/store_release barrier,defined by
* CONFIG_RTE_USE_C11_MEM_MODEL=y
* It depends on performance test results.
* By default, move common functions to rte_ring_generic.h
diff --git a/lib/librte_ring/rte_ring_generic.h b/lib/librte_ring/rte_ring_generic.h
index ea7dbe5b..953cdbbd 100644
--- a/lib/librte_ring/rte_ring_generic.h
+++ b/lib/librte_ring/rte_ring_generic.h
@@ -158,11 +158,14 @@ __rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
return 0;
*new_head = *old_head + n;
- if (is_sc)
- r->cons.head = *new_head, success = 1;
- else
+ if (is_sc) {
+ r->cons.head = *new_head;
+ rte_smp_rmb();
+ success = 1;
+ } else {
success = rte_atomic32_cmpset(&r->cons.head, *old_head,
*new_head);
+ }
} while (unlikely(success == 0));
return n;
}
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 84fa896d..c1bdb1df 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -33,7 +33,7 @@ extern "C" {
* classes of the same subport;
* - When any subport traffic class is oversubscribed
* (configuration time event), the usage of subport member
- * pipes with high demand for thattraffic class pipes is
+ * pipes with high demand for that traffic class pipes is
* truncated to a dynamically adjusted value with no
* impact to low demand pipes;
* 3. Pipe:
diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h
index 718147e0..7e6ced4e 100644
--- a/lib/librte_security/rte_security.h
+++ b/lib/librte_security/rte_security.h
@@ -534,7 +534,7 @@ struct rte_security_capability {
enum rte_security_pdcp_domain domain;
/**< PDCP mode of operation: Control or data */
uint32_t capa_flags;
- /**< Capabilitity flags, see RTE_SECURITY_PDCP_* */
+ /**< Capability flags, see RTE_SECURITY_PDCP_* */
} pdcp;
/**< PDCP capability */
};
@@ -566,7 +566,7 @@ struct rte_security_capability {
#define RTE_SECURITY_TX_HW_TRAILER_OFFLOAD 0x00000002
/**< HW constructs trailer of packets
* Transmitted packets will have the trailer added to them
- * by hardawre. The next protocol field will be based on
+ * by hardware. The next protocol field will be based on
* the mbuf->inner_esp_next_proto field.
*/
#define RTE_SECURITY_RX_HW_TRAILER_OFFLOAD 0x00010000
diff --git a/lib/librte_table/rte_table_hash.h b/lib/librte_table/rte_table_hash.h
index 6f55bd57..61a0eed6 100644
--- a/lib/librte_table/rte_table_hash.h
+++ b/lib/librte_table/rte_table_hash.h
@@ -29,7 +29,7 @@ extern "C" {
* be picked and dropped, the most likely candidate for drop, i.e. the
* current LRU key, is always picked. The LRU logic requires maintaining
* specific data structures per each bucket. Use-cases: flow cache, etc.
- * b. Extendible bucket (ext): The bucket is extended with space for 4 more
+ * b. Extendable bucket (ext): The bucket is extended with space for 4 more
* keys. This is done by allocating additional memory at table init time,
* which is used to create a pool of free keys (the size of this pool is
* configurable and always a multiple of 4). On key add operation, the
@@ -41,7 +41,7 @@ extern "C" {
* current bucket is in extended state and a match is not found in the
* first group of 4 keys, the search continues beyond the first group of
* 4 keys, potentially until all keys in this bucket are examined. The
- * extendible bucket logic requires maintaining specific data structures
+ * extendable bucket logic requires maintaining specific data structures
* per table and per each bucket. Use-cases: flow table, etc.
* 2. Key size:
* a. Configurable key size
@@ -86,7 +86,7 @@ struct rte_table_hash_params {
uint64_t seed;
};
-/** Extendible bucket hash table operations */
+/** Extendable bucket hash table operations */
extern struct rte_table_ops rte_table_hash_ext_ops;
extern struct rte_table_ops rte_table_hash_key8_ext_ops;
extern struct rte_table_ops rte_table_hash_key16_ext_ops;
diff --git a/lib/librte_table/rte_table_hash_func.h b/lib/librte_table/rte_table_hash_func.h
index 02296eab..11ea5a90 100644
--- a/lib/librte_table/rte_table_hash_func.h
+++ b/lib/librte_table/rte_table_hash_func.h
@@ -40,7 +40,7 @@ rte_crc32_u64(uint64_t crc, uint64_t v)
return _mm_crc32_u64(crc, v);
}
-#elif defined(RTE_ARCH_ARM64)
+#elif defined(RTE_ARCH_ARM64) && defined(RTE_MACHINE_CPUFLAG_CRC32)
#include "rte_table_hash_func_arm64.h"
#else
diff --git a/lib/librte_telemetry/Makefile b/lib/librte_telemetry/Makefile
index 1a050691..ef73a4e7 100644
--- a/lib/librte_telemetry/Makefile
+++ b/lib/librte_telemetry/Makefile
@@ -19,6 +19,11 @@ EXPORT_MAP := rte_telemetry_version.map
LIBABIVER := 1
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_rte_telemetry.o += -Wno-address-of-packed-member
+CFLAGS_rte_telemetry_parser.o += -Wno-address-of-packed-member
+endif
+
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_TELEMETRY) := rte_telemetry.c
SRCS-$(CONFIG_RTE_LIBRTE_TELEMETRY) += rte_telemetry_parser.c
diff --git a/lib/librte_telemetry/rte_telemetry_parser.c b/lib/librte_telemetry/rte_telemetry_parser.c
index 03a58a2f..9bc16eef 100644
--- a/lib/librte_telemetry/rte_telemetry_parser.c
+++ b/lib/librte_telemetry/rte_telemetry_parser.c
@@ -256,7 +256,7 @@ rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry,
int action, json_t *data)
{
int ret, num_metrics, i, p;
- struct rte_metric_name *names;
+ struct rte_metric_value *values;
uint64_t num_port_ids = 0;
uint32_t port_ids[RTE_MAX_ETHPORTS];
@@ -281,7 +281,7 @@ rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry,
return -1;
}
- num_metrics = rte_metrics_get_names(NULL, 0);
+ num_metrics = rte_metrics_get_values(0, NULL, 0);
if (num_metrics < 0) {
TELEMETRY_LOG_ERR("Cannot get metrics count");
@@ -300,8 +300,8 @@ rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry,
return -1;
}
- names = malloc(sizeof(struct rte_metric_name) * num_metrics);
- if (names == NULL) {
+ values = malloc(sizeof(struct rte_metric_value) * num_metrics);
+ if (values == NULL) {
TELEMETRY_LOG_ERR("Cannot allocate memory");
ret = rte_telemetry_send_error_response(telemetry,
-ENOMEM);
@@ -310,7 +310,6 @@ rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry,
return -1;
}
- const char *stat_names[num_metrics];
uint32_t stat_ids[num_metrics];
RTE_ETH_FOREACH_DEV(p) {
@@ -328,16 +327,13 @@ rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry,
goto fail;
}
- ret = rte_metrics_get_names(names, num_metrics);
- for (i = 0; i < num_metrics; i++)
- stat_names[i] = names[i].name;
-
- ret = rte_telemetry_stat_names_to_ids(telemetry, stat_names, stat_ids,
- num_metrics);
+ ret = rte_metrics_get_values(port_ids[0], values, num_metrics);
if (ret < 0) {
- TELEMETRY_LOG_ERR("Could not convert stat names to IDs");
+ TELEMETRY_LOG_ERR("Could not get stat values");
goto fail;
}
+ for (i = 0; i < num_metrics; i++)
+ stat_ids[i] = values[i].key;
ret = rte_telemetry_send_ports_stats_values(stat_ids, num_metrics,
port_ids, num_port_ids, telemetry);
@@ -349,7 +345,7 @@ rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry,
return 0;
fail:
- free(names);
+ free(values);
return -1;
}
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index d280ac42..5905e240 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -488,7 +488,7 @@ int rte_vhost_get_ifname(int vid, char *buf, size_t len);
* virtio queue index
*
* @return
- * num of avail entires left
+ * num of avail entries left
*/
uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
@@ -536,7 +536,7 @@ uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
/**
* Get guest mem table: a list of memory regions.
*
- * An rte_vhost_vhost_memory object will be allocated internaly, to hold the
+ * An rte_vhost_vhost_memory object will be allocated internally, to hold the
* guest memory regions. Application should free it at destroy_device()
* callback.
*
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 9883b049..09799516 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -240,7 +240,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
RTE_LOG(ERR, VHOST_CONFIG,
"failed to add vhost user connection with fd %d\n",
fd);
- goto err;
+ goto err_cleanup;
}
}
@@ -257,7 +257,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
if (vsocket->notify_ops->destroy_connection)
vsocket->notify_ops->destroy_connection(conn->vid);
- goto err;
+ goto err_cleanup;
}
pthread_mutex_lock(&vsocket->conn_mutex);
@@ -267,6 +267,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
fdset_pipe_notify(&vhost_user.fdset);
return;
+err_cleanup:
+ vhost_destroy_device(vid);
err:
free(conn);
close(fd);
@@ -295,13 +297,19 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
ret = vhost_user_msg_handler(conn->vid, connfd);
if (ret < 0) {
+ struct virtio_net *dev = get_device(conn->vid);
+
close(connfd);
*remove = 1;
- vhost_destroy_device(conn->vid);
+
+ if (dev)
+ vhost_destroy_device_notify(dev);
if (vsocket->notify_ops->destroy_connection)
vsocket->notify_ops->destroy_connection(conn->vid);
+ vhost_destroy_device(conn->vid);
+
pthread_mutex_lock(&vsocket->conn_mutex);
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
pthread_mutex_unlock(&vsocket->conn_mutex);
@@ -547,6 +555,9 @@ find_vhost_user_socket(const char *path)
{
int i;
+ if (path == NULL)
+ return NULL;
+
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
@@ -562,7 +573,7 @@ rte_vhost_driver_attach_vdpa_device(const char *path, int did)
{
struct vhost_user_socket *vsocket;
- if (rte_vdpa_get_device(did) == NULL)
+ if (rte_vdpa_get_device(did) == NULL || path == NULL)
return -1;
pthread_mutex_lock(&vhost_user.mutex);
@@ -961,6 +972,9 @@ rte_vhost_driver_unregister(const char *path)
int count;
struct vhost_user_connection *conn, *next;
+ if (path == NULL)
+ return -1;
+
again:
pthread_mutex_lock(&vhost_user.mutex);
diff --git a/lib/librte_vhost/vdpa.c b/lib/librte_vhost/vdpa.c
index e7d849ee..f560419b 100644
--- a/lib/librte_vhost/vdpa.c
+++ b/lib/librte_vhost/vdpa.c
@@ -49,7 +49,7 @@ rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
char device_name[MAX_VDPA_NAME_LEN];
int i;
- if (vdpa_device_num >= MAX_VHOST_DEVICE)
+ if (vdpa_device_num >= MAX_VHOST_DEVICE || addr == NULL || ops == NULL)
return -1;
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
@@ -66,7 +66,7 @@ rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
if (i == MAX_VHOST_DEVICE)
return -1;
- sprintf(device_name, "vdpa-dev-%d", i);
+ snprintf(device_name, sizeof(device_name), "vdpa-dev-%d", i);
dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
RTE_CACHE_LINE_SIZE);
if (!dev)
@@ -99,6 +99,9 @@ rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
struct rte_vdpa_device *dev;
int i;
+ if (addr == NULL)
+ return -1;
+
for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
dev = vdpa_devices[i];
if (dev && is_same_vdpa_device(&dev->addr, addr))
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 70ac6bc9..488cf169 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -460,7 +460,7 @@ rte_vhost_get_mtu(int vid, uint16_t *mtu)
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || mtu == NULL)
return -ENODEV;
if (!(dev->flags & VIRTIO_DEV_READY))
@@ -528,7 +528,7 @@ rte_vhost_get_ifname(int vid, char *buf, size_t len)
{
struct virtio_net *dev = get_device(vid);
- if (dev == NULL)
+ if (dev == NULL || buf == NULL)
return -1;
len = RTE_MIN(len, sizeof(dev->ifname));
@@ -545,7 +545,7 @@ rte_vhost_get_negotiated_features(int vid, uint64_t *features)
struct virtio_net *dev;
dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || features == NULL)
return -1;
*features = dev->features;
@@ -560,7 +560,7 @@ rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
size_t size;
dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || mem == NULL)
return -1;
size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
@@ -583,7 +583,7 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
struct vhost_virtqueue *vq;
dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || vring == NULL)
return -1;
if (vring_idx >= VHOST_MAX_VRING)
@@ -776,7 +776,7 @@ int rte_vhost_get_log_base(int vid, uint64_t *log_base,
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || log_base == NULL || log_size == NULL)
return -1;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
@@ -797,7 +797,7 @@ int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
return -1;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
@@ -818,7 +818,7 @@ int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
+ if (dev == NULL)
return -1;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 552b9298..bb9cff9f 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -686,16 +686,20 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
uint16_t old = vq->signalled_used;
uint16_t new = vq->last_used_idx;
+ bool signalled_used_valid = vq->signalled_used_valid;
+
+ vq->signalled_used = new;
+ vq->signalled_used_valid = true;
VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
__func__,
vhost_used_event(vq),
old, new);
- if (vhost_need_event(vhost_used_event(vq), new, old)
- && (vq->callfd >= 0)) {
- vq->signalled_used = vq->last_used_idx;
+
+ if ((vhost_need_event(vhost_used_event(vq), new, old) &&
+ (vq->callfd >= 0)) ||
+ unlikely(!signalled_used_valid))
eventfd_write(vq->callfd, (eventfd_t) 1);
- }
} else {
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
@@ -755,4 +759,38 @@ kick:
eventfd_write(vq->callfd, (eventfd_t)1);
}
+static __rte_always_inline void
+restore_mbuf(struct rte_mbuf *m)
+{
+ uint32_t mbuf_size, priv_size;
+
+ while (m) {
+ priv_size = rte_pktmbuf_priv_size(m->pool);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ /* start of buffer is after mbuf structure and priv data */
+
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ m = m->next;
+ }
+}
+
+static __rte_always_inline bool
+mbuf_is_consumed(struct rte_mbuf *m)
+{
+ while (m) {
+ if (rte_mbuf_refcnt_read(m) > 1)
+ return false;
+ m = m->next;
+ }
+
+ return true;
+}
+
+static __rte_always_inline void
+put_zmbuf(struct zcopy_mbuf *zmbuf)
+{
+ zmbuf->in_use = 0;
+}
+
#endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 0694c0a7..fc362ba9 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -1102,7 +1102,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
chain->para.hash_result_len,
- nb_descs, vq_size)) < 0) {
+ nb_descs, vq_size) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1617,7 +1617,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
op->sym->m_src->data_off = 0;
if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
- op, head, desc_idx)) < 0)
+ op, head, desc_idx) < 0))
break;
}
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 19e04c95..5552f8bb 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -93,15 +93,47 @@ get_blk_size(int fd)
return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
+/*
+ * Reclaim all the outstanding zmbufs for a virtqueue.
+ */
+static void
+drain_zmbuf_list(struct vhost_virtqueue *vq)
+{
+ struct zcopy_mbuf *zmbuf, *next;
+
+ for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+ zmbuf != NULL; zmbuf = next) {
+ next = TAILQ_NEXT(zmbuf, next);
+
+ while (!mbuf_is_consumed(zmbuf->mbuf))
+ usleep(1000);
+
+ TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
+ rte_pktmbuf_free(zmbuf->mbuf);
+ put_zmbuf(zmbuf);
+ vq->nr_zmbuf -= 1;
+ }
+}
+
static void
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
struct rte_vhost_mem_region *reg;
+ struct vhost_virtqueue *vq;
if (!dev || !dev->mem)
return;
+ if (dev->dequeue_zero_copy) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ vq = dev->virtqueue[i];
+ if (vq)
+ drain_zmbuf_list(vq);
+ }
+ }
+
for (i = 0; i < dev->mem->nregions; i++) {
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
@@ -1199,8 +1231,12 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
* the ring starts already enabled. Otherwise, it is enabled via
* the SET_VRING_ENABLE message.
*/
- if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
+ if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
vq->enabled = 1;
+ if (dev->notify_ops->vring_state_changed)
+ dev->notify_ops->vring_state_changed(
+ dev->vid, file.index, 1);
+ }
if (vq->kickfd >= 0)
close(vq->kickfd);
@@ -1212,15 +1248,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
static void
free_zmbufs(struct vhost_virtqueue *vq)
{
- struct zcopy_mbuf *zmbuf, *next;
-
- for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
- zmbuf != NULL; zmbuf = next) {
- next = TAILQ_NEXT(zmbuf, next);
-
- rte_pktmbuf_free(zmbuf->mbuf);
- TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
- }
+ drain_zmbuf_list(vq);
rte_free(vq->zmbufs);
}
@@ -1274,6 +1302,8 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->signalled_used_valid = false;
+
if (dev->dequeue_zero_copy)
free_zmbufs(vq);
if (vq_is_packed(dev)) {
@@ -1321,6 +1351,10 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
dev->notify_ops->vring_state_changed(dev->vid,
index, enable);
+ /* On disable, rings have to be stopped being processed. */
+ if (!enable && dev->dequeue_zero_copy)
+ drain_zmbuf_list(dev->virtqueue[index]);
+
dev->virtqueue[index]->enabled = enable;
return VH_RESULT_OK;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 15d682c3..a6576891 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1088,12 +1088,6 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
}
}
-static __rte_always_inline void
-put_zmbuf(struct zcopy_mbuf *zmbuf)
-{
- zmbuf->in_use = 0;
-}
-
static __rte_always_inline int
copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct buf_vector *buf_vec, uint16_t nr_vec,
@@ -1331,34 +1325,6 @@ again:
return NULL;
}
-static __rte_always_inline bool
-mbuf_is_consumed(struct rte_mbuf *m)
-{
- while (m) {
- if (rte_mbuf_refcnt_read(m) > 1)
- return false;
- m = m->next;
- }
-
- return true;
-}
-
-static __rte_always_inline void
-restore_mbuf(struct rte_mbuf *m)
-{
- uint32_t mbuf_size, priv_size;
-
- while (m) {
- priv_size = rte_pktmbuf_priv_size(m->pool);
- mbuf_size = sizeof(struct rte_mbuf) + priv_size;
- /* start of buffer is after mbuf structure and priv data */
-
- m->buf_addr = (char *)m + mbuf_size;
- m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
- m = m->next;
- }
-}
-
static __rte_always_inline uint16_t
virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
diff --git a/meson.build b/meson.build
index c48ecb8a..63b32c6e 100644
--- a/meson.build
+++ b/meson.build
@@ -2,7 +2,7 @@
# Copyright(c) 2017 Intel Corporation
project('DPDK', 'C',
- version: '18.11.1',
+ version: '18.11.2',
license: 'BSD',
default_options: ['buildtype=release', 'default_library=static'],
meson_version: '>= 0.41'
@@ -69,13 +69,6 @@ configure_file(output: build_cfg,
# them.
dpdk_drivers = ['-Wl,--whole-archive'] + dpdk_drivers + ['-Wl,--no-whole-archive']
-# driver .so files often depend upon the bus drivers for their connect bus,
-# e.g. ixgbe depends on librte_bus_pci. This means that the bus drivers need
-# to be in the library path, so symlink the drivers from the main lib directory.
-meson.add_install_script('buildtools/symlink-drivers-solibs.sh',
- driver_install_path,
- get_option('libdir'))
-
pkg = import('pkgconfig')
pkg.generate(name: meson.project_name(),
filebase: 'lib' + meson.project_name().to_lower(),
diff --git a/mk/exec-env/linuxapp/rte.vars.mk b/mk/exec-env/linuxapp/rte.vars.mk
index 3129edc8..57ee8215 100644
--- a/mk/exec-env/linuxapp/rte.vars.mk
+++ b/mk/exec-env/linuxapp/rte.vars.mk
@@ -24,6 +24,8 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
EXECENV_LDLIBS += -lgcc_s
endif
+EXECENV_LDLIBS-$(CONFIG_RTE_USE_LIBBSD) += -lbsd
+
# force applications to link with gcc/icc instead of using ld
LINK_USING_CC := 1
@@ -32,4 +34,8 @@ EXECENV_LDFLAGS += -export-dynamic
# Add library to the group to resolve symbols
EXECENV_LDLIBS += -ldl
+# EXECENV_LDLIBS-y applies to lib.so and app linking
+# while EXECENV_LDLIBS applies only to app linking.
+EXECENV_LDLIBS += $(EXECENV_LDLIBS-y)
+
export EXECENV_CFLAGS EXECENV_LDFLAGS EXECENV_ASFLAGS EXECENV_LDLIBS
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 5699d979..c539bb9c 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -303,9 +303,6 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
endif
-ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_USE_LIBBSD),yy)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lbsd
-endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMBER) += -lm
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
index c696a217..4df8849a 100644
--- a/mk/rte.lib.mk
+++ b/mk/rte.lib.mk
@@ -35,6 +35,8 @@ PREINSTALL = $(SYMLINK-FILES-y)
_INSTALL = $(INSTALL-FILES-y) $(RTE_OUTPUT)/lib/$(LIB)
_CLEAN = doclean
+LDLIBS += $(EXECENV_LDLIBS-y)
+
.PHONY: all
all: install
diff --git a/mk/toolchain/gcc/rte.toolchain-compat.mk b/mk/toolchain/gcc/rte.toolchain-compat.mk
index dbddc986..ea40a11c 100644
--- a/mk/toolchain/gcc/rte.toolchain-compat.mk
+++ b/mk/toolchain/gcc/rte.toolchain-compat.mk
@@ -22,11 +22,20 @@ HOST_GCC_VERSION = $(HOST_GCC_MAJOR)$(HOST_GCC_MINOR)
LD_VERSION = $(shell $(LD) -v)
# disable AVX512F support for GCC & binutils 2.30 as a workaround for Bug 97
+ifeq ($(CONFIG_RTE_ARCH_X86), y)
ifneq ($(filter 2.30%,$(LD_VERSION)),)
FORCE_DISABLE_AVX512 := y
# print warning only once for librte_eal
ifneq ($(filter %librte_eal,$(CURDIR)),)
-$(warning AVX512 support disabled because of ld 2.30. See Bug 97)
+$(warning AVX512 support disabled because of binutils 2.30. See Bug 97)
+endif
+endif
+ifneq ($(filter 2.31%,$(LD_VERSION)),)
+FORCE_DISABLE_AVX512 := y
+# print warning only once for librte_eal
+ifneq ($(filter %librte_eal,$(CURDIR)),)
+$(warning AVX512 support disabled because of binutils 2.31. See Bug 249)
+endif
endif
endif
diff --git a/mk/toolchain/gcc/rte.vars.mk b/mk/toolchain/gcc/rte.vars.mk
index d8b99faf..b852fcfd 100644
--- a/mk/toolchain/gcc/rte.vars.mk
+++ b/mk/toolchain/gcc/rte.vars.mk
@@ -87,5 +87,8 @@ WERROR_FLAGS += -Wimplicit-fallthrough=2
WERROR_FLAGS += -Wno-format-truncation
endif
+# disable packed member unalign warnings
+WERROR_FLAGS += -Wno-address-of-packed-member
+
export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF
export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index 36c7ee82..8d3da901 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -2,7 +2,7 @@
# Copyright 2014 6WIND S.A.
Name: dpdk
-Version: 18.11.1
+Version: 18.11.2
Release: 1
Packager: packaging@6wind.com
URL: http://dpdk.org
diff --git a/test/test/autotest_data.py b/test/test/autotest_data.py
index 0fb7866d..d878f954 100644
--- a/test/test/autotest_data.py
+++ b/test/test/autotest_data.py
@@ -297,12 +297,6 @@ parallel_test_list = [
"Report": None,
},
{
- "Name": "Devargs autotest",
- "Command": "devargs_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
"Name": "Link bonding autotest",
"Command": "link_bonding_autotest",
"Func": default_autotest,
diff --git a/test/test/commands.c b/test/test/commands.c
index 94fbc310..8d5a03a9 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -44,6 +44,7 @@
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
#include <cmdline.h>
+#include <rte_string_fns.h>
#include "test.h"
@@ -365,23 +366,22 @@ cmdline_parse_ctx_t main_ctx[] = {
int commands_init(void)
{
struct test_command *t;
- char *commands, *ptr;
+ char *commands;
int commands_len = 0;
TAILQ_FOREACH(t, &commands_list, next) {
commands_len += strlen(t->command) + 1;
}
- commands = malloc(commands_len + 1);
+ commands = (char *)calloc(commands_len, sizeof(char));
if (!commands)
return -1;
- ptr = commands;
TAILQ_FOREACH(t, &commands_list, next) {
- ptr += sprintf(ptr, "%s#", t->command);
+ strlcat(commands, t->command, commands_len);
+ if (TAILQ_NEXT(t, next) != NULL)
+ strlcat(commands, "#", commands_len);
}
- ptr--;
- ptr[0] = '\0';
cmd_autotest_autotest.string_data.str = commands;
return 0;
diff --git a/test/test/meson.build b/test/test/meson.build
index 5a4816fe..8f03ddda 100644
--- a/test/test/meson.build
+++ b/test/test/meson.build
@@ -154,7 +154,6 @@ test_names = [
'cycles_autotest',
'debug_autotest',
'delay_us_sleep_autotest',
- 'devargs_autotest',
'distributor_autotest',
'distributor_perf_autotest',
'eal_flags_autotest',
@@ -263,7 +262,7 @@ if cc.has_argument('-Wno-format-truncation')
endif
# specify -D_GNU_SOURCE unconditionally
-default_cflags += '-D_GNU_SOURCE'
+cflags += '-D_GNU_SOURCE'
test_dep_objs = []
compress_test_dep = dependency('zlib', required: false)
diff --git a/test/test/test_barrier.c b/test/test/test_barrier.c
index 82b572c3..92f3d325 100644
--- a/test/test/test_barrier.c
+++ b/test/test/test_barrier.c
@@ -92,12 +92,14 @@ plock_lock(struct plock *l, uint32_t self)
other = self ^ 1;
l->flag[self] = 1;
+ rte_smp_wmb();
l->victim = self;
store_load_barrier(l->utype);
while (l->flag[other] == 1 && l->victim == self)
rte_pause();
+ rte_smp_rmb();
}
static void
@@ -202,7 +204,7 @@ plock_test(uint32_t iter, enum plock_use_type utype)
printf("%s(iter=%u, utype=%u) started on %u lcores\n",
__func__, iter, utype, n);
- if (pt == NULL || lpt == NULL) {
+ if (pt == NULL || lpt == NULL || sum == NULL) {
printf("%s: failed to allocate memory for %u lcores\n",
__func__, n);
free(pt);
@@ -252,7 +254,7 @@ plock_test(uint32_t iter, enum plock_use_type utype)
/* race condition occurred, lock doesn't work properly */
if (sum[i] != pt[i].val || 2 * iter != pt[i].iter) {
- printf("error: local and shared sums don't much\n");
+ printf("error: local and shared sums don't match\n");
rc = -1;
}
}
diff --git a/test/test/test_compressdev.c b/test/test/test_compressdev.c
index 5d5e5190..03334225 100644
--- a/test/test/test_compressdev.c
+++ b/test/test/test_compressdev.c
@@ -4,6 +4,7 @@
#include <string.h>
#include <zlib.h>
#include <math.h>
+#include <unistd.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 84065eb4..fbe8c21e 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -15,6 +15,7 @@
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_string_fns.h>
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
#include <rte_cryptodev_scheduler.h>
@@ -374,7 +375,7 @@ testsuite_setup(void)
snprintf(vdev_args, sizeof(vdev_args),
"%s%d", temp_str, i);
strcpy(temp_str, vdev_args);
- strcat(temp_str, ";");
+ strlcat(temp_str, ";", sizeof(temp_str));
slave_core_count++;
socket_id = lcore_config[i].socket_id;
}
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index f8bd8583..52c37764 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -27,10 +27,10 @@
#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC 0x0020 /* DPAA2_SEC flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC 0x0040 /* DPAA_SEC flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_MVSAM 0x0080 /* Marvell flag */
-#define BLOCKCIPHER_TEST_TARGET_PMD_CCP 0x0040 /* CCP flag */
-#define BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO 0x0200 /* VIRTIO flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_OCTEONTX 0x0100 /* OCTEON TX flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO 0x0200 /* VIRTIO flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_CAAM_JR 0x0400 /* CAAM_JR flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP 0x0800 /* CCP flag */
#define BLOCKCIPHER_TEST_OP_CIPHER (BLOCKCIPHER_TEST_OP_ENCRYPT | \
BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_distributor.c b/test/test/test_distributor.c
index 98919ec0..da3348fd 100644
--- a/test/test/test_distributor.c
+++ b/test/test/test_distributor.c
@@ -11,6 +11,7 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_distributor.h>
+#include <rte_string_fns.h>
#define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
#define BURST 32
@@ -642,9 +643,11 @@ test_distributor(void)
worker_params.dist = dist[i];
if (i)
- sprintf(worker_params.name, "burst");
+ strlcpy(worker_params.name, "burst",
+ sizeof(worker_params.name));
else
- sprintf(worker_params.name, "single");
+ strlcpy(worker_params.name, "single",
+ sizeof(worker_params.name));
rte_eal_mp_remote_launch(handle_work,
&worker_params, SKIP_MASTER);
diff --git a/test/test/test_eal_flags.c b/test/test/test_eal_flags.c
index 81e345b8..775ccd3d 100644
--- a/test/test/test_eal_flags.c
+++ b/test/test/test_eal_flags.c
@@ -17,6 +17,7 @@
#include <sys/wait.h>
#include <sys/file.h>
#include <limits.h>
+#include <fcntl.h>
#include <rte_per_lcore.h>
#include <rte_debug.h>
diff --git a/test/test/test_event_eth_rx_adapter.c b/test/test/test_event_eth_rx_adapter.c
index 1d3be82b..38f5c039 100644
--- a/test/test/test_event_eth_rx_adapter.c
+++ b/test/test/test_event_eth_rx_adapter.c
@@ -479,7 +479,8 @@ adapter_multi_eth_add_del(void)
/* add the max port for rx_adapter */
port_index = rte_eth_dev_count_total();
for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
- sprintf(driver_name, "%s%u", "net_null", drv_id);
+ snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
+ drv_id);
err = rte_vdev_init(driver_name, NULL);
TEST_ASSERT(err == 0, "Failed driver %s got %d",
driver_name, err);
diff --git a/test/test/test_hash_perf.c b/test/test/test_hash_perf.c
index 52521118..5648fce0 100644
--- a/test/test/test_hash_perf.c
+++ b/test/test/test_hash_perf.c
@@ -85,9 +85,11 @@ create_table(unsigned int with_data, unsigned int table_index,
if (with_data)
/* Table will store 8-byte data */
- sprintf(name, "test_hash%d_data", hashtest_key_lens[table_index]);
+ snprintf(name, sizeof(name), "test_hash%u_data",
+ hashtest_key_lens[table_index]);
else
- sprintf(name, "test_hash%d", hashtest_key_lens[table_index]);
+ snprintf(name, sizeof(name), "test_hash%u",
+ hashtest_key_lens[table_index]);
if (with_locks)
diff --git a/test/test/test_link_bonding.c b/test/test/test_link_bonding.c
index 0fe1d78e..0438e76c 100644
--- a/test/test/test_link_bonding.c
+++ b/test/test/test_link_bonding.c
@@ -201,6 +201,7 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
}
static int slaves_initialized;
+static int mac_slaves_initialized;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cvar = PTHREAD_COND_INITIALIZER;
@@ -873,10 +874,11 @@ test_set_explicit_bonded_mac(void)
static int
test_set_bonded_port_initialization_mac_assignment(void)
{
- int i, slave_count, bonded_port_id;
+ int i, slave_count;
uint16_t slaves[RTE_MAX_ETHPORTS];
- int slave_port_ids[BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT];
+ static int bonded_port_id = -1;
+ static int slave_port_ids[BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT];
struct ether_addr slave_mac_addr, bonded_mac_addr, read_mac_addr;
@@ -887,29 +889,36 @@ test_set_bonded_port_initialization_mac_assignment(void)
/*
* 1. a - Create / configure bonded / slave ethdevs
*/
- bonded_port_id = rte_eth_bond_create("net_bonding_mac_ass_test",
- BONDING_MODE_ACTIVE_BACKUP, rte_socket_id());
- TEST_ASSERT(bonded_port_id > 0, "failed to create bonded device");
+ if (bonded_port_id == -1) {
+ bonded_port_id = rte_eth_bond_create("net_bonding_mac_ass_test",
+ BONDING_MODE_ACTIVE_BACKUP, rte_socket_id());
+ TEST_ASSERT(bonded_port_id > 0, "failed to create bonded device");
- TEST_ASSERT_SUCCESS(configure_ethdev(bonded_port_id, 0, 0),
- "Failed to configure bonded ethdev");
+ TEST_ASSERT_SUCCESS(configure_ethdev(bonded_port_id, 0, 0),
+ "Failed to configure bonded ethdev");
+ }
- for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) {
- char pmd_name[RTE_ETH_NAME_MAX_LEN];
+ if (!mac_slaves_initialized) {
+ for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) {
+ char pmd_name[RTE_ETH_NAME_MAX_LEN];
- slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = i + 100;
+ slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = i + 100;
- snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, "eth_slave_%d", i);
+ snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN,
+ "eth_slave_%d", i);
- slave_port_ids[i] = virtual_ethdev_create(pmd_name,
- &slave_mac_addr, rte_socket_id(), 1);
+ slave_port_ids[i] = virtual_ethdev_create(pmd_name,
+ &slave_mac_addr, rte_socket_id(), 1);
- TEST_ASSERT(slave_port_ids[i] >= 0,
- "Failed to create slave ethdev %s", pmd_name);
+ TEST_ASSERT(slave_port_ids[i] >= 0,
+ "Failed to create slave ethdev %s",
+ pmd_name);
- TEST_ASSERT_SUCCESS(configure_ethdev(slave_port_ids[i], 1, 0),
- "Failed to configure virtual ethdev %s",
- pmd_name);
+ TEST_ASSERT_SUCCESS(configure_ethdev(slave_port_ids[i], 1, 0),
+ "Failed to configure virtual ethdev %s",
+ pmd_name);
+ }
+ mac_slaves_initialized = 1;
}
diff --git a/test/test/test_pmd_perf.c b/test/test/test_pmd_perf.c
index f5095c87..ed8524a1 100644
--- a/test/test/test_pmd_perf.c
+++ b/test/test/test_pmd_perf.c
@@ -493,16 +493,21 @@ main_loop(__rte_unused void *args)
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- int nb_free = pkt_per_port;
+ int nb_free = 0;
+ uint64_t timeout = 10000;
do { /* dry out */
nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
nb_tx = 0;
while (nb_tx < nb_rx)
rte_pktmbuf_free(pkts_burst[nb_tx++]);
- nb_free -= nb_rx;
- } while (nb_free != 0);
- printf("free %d mbuf left in port %u\n", pkt_per_port, portid);
+ nb_free += nb_rx;
+
+ if (unlikely(nb_rx == 0))
+ timeout--;
+ } while (nb_free != pkt_per_port && timeout != 0);
+ printf("free %d (expected %d) mbuf left in port %u\n", nb_free,
+ pkt_per_port, portid);
}
if (count == 0)
diff --git a/test/test/test_spinlock.c b/test/test/test_spinlock.c
index 73bff128..6ac74959 100644
--- a/test/test/test_spinlock.c
+++ b/test/test/test_spinlock.c
@@ -96,16 +96,16 @@ test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
}
static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
-static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+static uint64_t time_count[RTE_MAX_LCORE] = {0};
-#define TIME_MS 100
+#define MAX_LOOP 10000
static int
load_loop_fn(void *func_param)
{
uint64_t time_diff = 0, begin;
uint64_t hz = rte_get_timer_hz();
- uint64_t lcount = 0;
+ volatile uint64_t lcount = 0;
const int use_lock = *(int*)func_param;
const unsigned lcore = rte_lcore_id();
@@ -114,17 +114,15 @@ load_loop_fn(void *func_param)
while (rte_atomic32_read(&synchro) == 0);
begin = rte_get_timer_cycles();
- while (time_diff < hz * TIME_MS / 1000) {
+ while (lcount < MAX_LOOP) {
if (use_lock)
rte_spinlock_lock(&lk);
lcount++;
if (use_lock)
rte_spinlock_unlock(&lk);
- /* delay to make lock duty cycle slighlty realistic */
- rte_delay_us(1);
- time_diff = rte_get_timer_cycles() - begin;
}
- lock_count[lcore] = lcount;
+ time_diff = rte_get_timer_cycles() - begin;
+ time_count[lcore] = time_diff * 1000000 / hz;
return 0;
}
@@ -138,14 +136,16 @@ test_spinlock_perf(void)
printf("\nTest with no lock on single core...\n");
load_loop_fn(&lock);
- printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
- memset(lock_count, 0, sizeof(lock_count));
+ printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
+ time_count[lcore]);
+ memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
lock = 1;
load_loop_fn(&lock);
- printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
- memset(lock_count, 0, sizeof(lock_count));
+ printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
+ time_count[lcore]);
+ memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
@@ -160,11 +160,12 @@ test_spinlock_perf(void)
rte_eal_mp_wait_lcore();
RTE_LCORE_FOREACH(i) {
- printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
- total += lock_count[i];
+ printf("Core [%u] Cost Time = %"PRIu64" us\n", i,
+ time_count[i]);
+ total += time_count[i];
}
- printf("Total count = %"PRIu64"\n", total);
+ printf("Total Cost Time = %"PRIu64" us\n", total);
return 0;
}
diff --git a/test/test/test_string_fns.c b/test/test/test_string_fns.c
index 3f091ab9..5e105d2b 100644
--- a/test/test/test_string_fns.c
+++ b/test/test/test_string_fns.c
@@ -130,10 +130,55 @@ test_rte_strsplit(void)
}
static int
+test_rte_strlcat(void)
+{
+ /* only run actual unit tests if we have system-provided strlcat */
+#if defined(__BSD_VISIBLE) || defined(RTE_USE_LIBBSD)
+#define BUF_LEN 32
+ const char dst[BUF_LEN] = "Test string";
+ const char src[] = " appended";
+ char bsd_dst[BUF_LEN];
+ char rte_dst[BUF_LEN];
+ size_t i, bsd_ret, rte_ret;
+
+ LOG("dst = '%s', strlen(dst) = %zu\n", dst, strlen(dst));
+ LOG("src = '%s', strlen(src) = %zu\n", src, strlen(src));
+ LOG("---\n");
+
+ for (i = 0; i < BUF_LEN; i++) {
+ /* initialize destination buffers */
+ memcpy(bsd_dst, dst, BUF_LEN);
+ memcpy(rte_dst, dst, BUF_LEN);
+ /* compare implementations */
+ bsd_ret = strlcat(bsd_dst, src, i);
+ rte_ret = rte_strlcat(rte_dst, src, i);
+ if (bsd_ret != rte_ret) {
+ LOG("Incorrect retval for buf length = %zu\n", i);
+ LOG("BSD: '%zu', rte: '%zu'\n", bsd_ret, rte_ret);
+ return -1;
+ }
+ if (memcmp(bsd_dst, rte_dst, BUF_LEN) != 0) {
+ LOG("Resulting buffers don't match\n");
+ LOG("BSD: '%s', rte: '%s'\n", bsd_dst, rte_dst);
+ return -1;
+ }
+ LOG("buffer size = %zu: dst = '%s', ret = %zu\n",
+ i, rte_dst, rte_ret);
+ }
+ LOG("Checked %zu combinations\n", i);
+#undef BUF_LEN
+#endif /* defined(__BSD_VISIBLE) || defined(RTE_USE_LIBBSD) */
+
+ return 0;
+}
+
+static int
test_string_fns(void)
{
if (test_rte_strsplit() < 0)
return -1;
+ if (test_rte_strlcat() < 0)
+ return -1;
return 0;
}