summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /drivers
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/fslmc/Makefile9
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c25
-rw-r--r--drivers/bus/fslmc/fslmc_logs.h2
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c165
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.h52
-rw-r--r--drivers/bus/fslmc/mc/dpbp.c2
-rw-r--r--drivers/bus/fslmc/mc/dpci.c307
-rw-r--r--drivers/bus/fslmc/mc/dpcon.c230
-rw-r--r--drivers/bus/fslmc/mc/dpio.c46
-rw-r--r--drivers/bus/fslmc/mc/dpmng.c88
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp_cmd.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci.h404
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci_cmd.h147
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon.h238
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon_cmd.h175
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio.h32
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio_cmd.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng.h106
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng_cmd.h61
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_cmd.h2
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c46
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpci.c179
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c209
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h7
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h114
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h2
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h46
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c98
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map28
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h6
-rw-r--r--drivers/crypto/aesni_gcm/Makefile9
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h97
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c416
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c94
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h46
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c124
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c100
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h8
-rw-r--r--drivers/crypto/armv8/Makefile4
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c154
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c73
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h20
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile8
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c588
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h2
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h146
-rw-r--r--drivers/crypto/dpaa2_sec/hw/compat.h6
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/algo.h230
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/common.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/ipsec.h21
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h2
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c2
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h2
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h2
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c156
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c63
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h6
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c71
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c65
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h3
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c283
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c174
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h18
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h31
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c23
-rw-r--r--drivers/crypto/qat/qat_crypto.c629
-rw-r--r--drivers/crypto/qat/qat_crypto.h47
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h102
-rw-r--r--drivers/crypto/qat/qat_qp.c15
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c55
-rw-r--r--drivers/crypto/scheduler/Makefile1
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c56
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h42
-rw-r--r--drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map2
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c47
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c415
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c18
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c118
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c83
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h19
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c41
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c151
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c59
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h7
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c131
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c61
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h7
-rw-r--r--drivers/event/Makefile6
-rw-r--r--drivers/event/dpaa2/Makefile60
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c692
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h114
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c139
-rw-r--r--drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map3
-rw-r--r--drivers/event/octeontx/Makefile4
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf.h4
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c11
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h10
-rw-r--r--drivers/event/octeontx/ssovf_mbox.c6
-rw-r--r--drivers/event/octeontx/ssovf_probe.c4
-rw-r--r--drivers/event/octeontx/ssovf_worker.c49
-rw-r--r--drivers/event/octeontx/ssovf_worker.h27
-rw-r--r--drivers/event/skeleton/Makefile4
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c38
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.h7
-rw-r--r--drivers/event/sw/event_ring.h14
-rw-r--r--drivers/event/sw/iq_ring.h20
-rw-r--r--drivers/event/sw/sw_evdev.c103
-rw-r--r--drivers/event/sw/sw_evdev.h10
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c24
-rw-r--r--drivers/event/sw/sw_evdev_worker.c33
-rw-r--r--drivers/event/sw/sw_evdev_xstats.c28
-rw-r--r--drivers/mempool/Makefile3
-rw-r--r--drivers/mempool/dpaa2/Makefile3
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c28
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.h2
-rw-r--r--drivers/mempool/ring/Makefile3
-rw-r--r--drivers/mempool/stack/Makefile3
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c9
-rw-r--r--drivers/net/ark/ark_ethdev.c78
-rw-r--r--drivers/net/ark/ark_ethdev.h4
-rw-r--r--drivers/net/ark/ark_ext.h4
-rw-r--r--drivers/net/ark/ark_global.h5
-rw-r--r--drivers/net/avp/avp_ethdev.c31
-rw-r--r--drivers/net/bnx2x/bnx2x.c14
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c8
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c10
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.h2
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h12
-rw-r--r--drivers/net/bnx2x/ecore_init.h2
-rw-r--r--drivers/net/bnx2x/ecore_sp.c4
-rw-r--r--drivers/net/bnx2x/ecore_sp.h22
-rw-r--r--drivers/net/bnx2x/elink.c16
-rw-r--r--drivers/net/bnxt/Makefile4
-rw-r--r--drivers/net/bnxt/bnxt.h131
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c134
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h17
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c846
-rw-r--r--drivers/net/bnxt/bnxt_filter.c54
-rw-r--r--drivers/net/bnxt/bnxt_filter.h3
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c1794
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h70
-rw-r--r--drivers/net/bnxt/bnxt_irq.c23
-rw-r--r--drivers/net/bnxt/bnxt_ring.c159
-rw-r--r--drivers/net/bnxt/bnxt_ring.h4
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c56
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h3
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c393
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h46
-rw-r--r--drivers/net/bnxt/bnxt_stats.c322
-rw-r--r--drivers/net/bnxt/bnxt_stats.h10
-rw-r--r--drivers/net/bnxt/bnxt_txr.c3
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c68
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h20
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h5959
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.c843
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.h354
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt_version.map20
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c372
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.h74
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h31
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c5
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c55
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c44
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c688
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h38
-rw-r--r--drivers/net/bonding/rte_eth_bond_version.map13
-rw-r--r--drivers/net/cxgbe/base/adapter.h9
-rw-r--r--drivers/net/cxgbe/base/common.h30
-rw-r--r--drivers/net/cxgbe/base/t4_chip_type.h11
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c1141
-rw-r--r--drivers/net/cxgbe/base/t4_hw.h18
-rw-r--r--drivers/net/cxgbe/base/t4_msg.h16
-rw-r--r--drivers/net/cxgbe/base/t4_pci_id_tbl.h15
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h33
-rw-r--r--drivers/net/cxgbe/base/t4_regs_values.h9
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h31
-rw-r--r--drivers/net/cxgbe/cxgbe.h1
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h11
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c16
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c239
-rw-r--r--drivers/net/cxgbe/sge.c346
-rw-r--r--drivers/net/dpaa2/Makefile2
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c39
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h2
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c783
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h31
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c342
-rw-r--r--drivers/net/dpaa2/mc/dpni.c301
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpkg.h2
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni.h367
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni_cmd.h144
-rw-r--r--drivers/net/e1000/Makefile1
-rw-r--r--drivers/net/e1000/e1000_ethdev.h99
-rw-r--r--drivers/net/e1000/em_ethdev.c17
-rw-r--r--drivers/net/e1000/igb_ethdev.c684
-rw-r--r--drivers/net/e1000/igb_flow.c1707
-rw-r--r--drivers/net/e1000/igb_pf.c2
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h2
-rw-r--r--drivers/net/ena/ena_ethdev.c16
-rw-r--r--drivers/net/enic/Makefile1
-rw-r--r--drivers/net/enic/base/cq_enet_desc.h13
-rw-r--r--drivers/net/enic/base/vnic_dev.c162
-rw-r--r--drivers/net/enic/base/vnic_dev.h5
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h81
-rw-r--r--drivers/net/enic/enic.h23
-rw-r--r--drivers/net/enic/enic_clsf.c18
-rw-r--r--drivers/net/enic/enic_ethdev.c22
-rw-r--r--drivers/net/enic/enic_flow.c1544
-rw-r--r--drivers/net/enic/enic_main.c7
-rw-r--r--drivers/net/enic/enic_res.c15
-rw-r--r--drivers/net/enic/enic_rxtx.c19
-rw-r--r--drivers/net/failsafe/Makefile62
-rw-r--r--drivers/net/failsafe/failsafe.c299
-rw-r--r--drivers/net/failsafe/failsafe_args.c474
-rw-r--r--drivers/net/failsafe/failsafe_eal.c118
-rw-r--r--drivers/net/failsafe/failsafe_ether.c437
-rw-r--r--drivers/net/failsafe/failsafe_flow.c244
-rw-r--r--drivers/net/failsafe/failsafe_ops.c867
-rw-r--r--drivers/net/failsafe/failsafe_private.h359
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c203
-rw-r--r--drivers/net/failsafe/rte_pmd_failsafe_version.map4
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c21
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c19
-rw-r--r--drivers/net/i40e/Makefile6
-rw-r--r--drivers/net/i40e/base/README2
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c12
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h71
-rw-r--r--drivers/net/i40e/base/i40e_common.c549
-rw-r--r--drivers/net/i40e/base/i40e_devids.h1
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c20
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h33
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/base/i40e_type.h49
-rw-r--r--drivers/net/i40e/base/i40e_virtchnl.h445
-rw-r--r--drivers/net/i40e/base/virtchnl.h772
-rw-r--r--drivers/net/i40e/i40e_ethdev.c322
-rw-r--r--drivers/net/i40e/i40e_ethdev.h130
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c324
-rw-r--r--drivers/net/i40e/i40e_fdir.c56
-rw-r--r--drivers/net/i40e/i40e_flow.c2780
-rw-r--r--drivers/net/i40e/i40e_pf.c277
-rw-r--r--drivers/net/i40e/i40e_pf.h37
-rw-r--r--drivers/net/i40e/i40e_rxtx.c10
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c38
-rw-r--r--drivers/net/i40e/i40e_tm.c976
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c258
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h54
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map7
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/base/README2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_api.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_defines.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c328
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h93
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c668
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c26
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c544
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c108
-rw-r--r--drivers/net/ixgbe/ixgbe_tm.c1043
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c153
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h215
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map14
-rw-r--r--drivers/net/kni/rte_eth_kni.c2
-rw-r--r--drivers/net/liquidio/base/lio_hw_defs.h6
-rw-r--r--drivers/net/liquidio/lio_ethdev.c21
-rw-r--r--drivers/net/liquidio/lio_rxtx.c37
-rw-r--r--drivers/net/liquidio/lio_struct.h2
-rw-r--r--drivers/net/mlx4/Makefile6
-rw-r--r--drivers/net/mlx4/mlx4.c682
-rw-r--r--drivers/net/mlx4/mlx4.h31
-rw-r--r--drivers/net/mlx4/mlx4_flow.c238
-rw-r--r--drivers/net/mlx4/mlx4_flow.h8
-rw-r--r--drivers/net/mlx5/Makefile10
-rw-r--r--drivers/net/mlx5/mlx5.c28
-rw-r--r--drivers/net/mlx5/mlx5.h5
-rw-r--r--drivers/net/mlx5/mlx5_defs.h18
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c200
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c1
-rw-r--r--drivers/net/mlx5/mlx5_flow.c50
-rw-r--r--drivers/net/mlx5/mlx5_mac.c2
-rw-r--r--drivers/net/mlx5/mlx5_mr.c17
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c2
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c263
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c667
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h297
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.c1417
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c19
-rw-r--r--drivers/net/mlx5/mlx5_txq.c24
-rw-r--r--drivers/net/nfp/nfp_net.c26
-rw-r--r--drivers/net/null/rte_eth_null.c3
-rw-r--r--drivers/net/qede/Makefile1
-rw-r--r--drivers/net/qede/base/bcm_osal.c89
-rw-r--r--drivers/net/qede/base/bcm_osal.h15
-rw-r--r--drivers/net/qede/base/common_hsi.h68
-rw-r--r--drivers/net/qede/base/ecore.h4
-rw-r--r--drivers/net/qede/base/ecore_dev.c474
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h55
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h45
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h24
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_func.h4
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c94
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h64
-rw-r--r--drivers/net/qede/base/ecore_int.c146
-rw-r--r--drivers/net/qede/base/ecore_int.h3
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h12
-rw-r--r--drivers/net/qede/base/ecore_l2.c188
-rw-r--r--drivers/net/qede/base/ecore_mcp.c48
-rw-r--r--drivers/net/qede/base/ecore_mcp.h11
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h11
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h649
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c23
-rw-r--r--drivers/net/qede/base/eth_common.h4
-rw-r--r--drivers/net/qede/base/mcp_public.h58
-rw-r--r--drivers/net/qede/base/reg_addr.h5
-rw-r--r--drivers/net/qede/qede_eth_if.c318
-rw-r--r--drivers/net/qede/qede_eth_if.h132
-rw-r--r--drivers/net/qede/qede_ethdev.c990
-rw-r--r--drivers/net/qede/qede_ethdev.h41
-rw-r--r--drivers/net/qede/qede_if.h81
-rw-r--r--drivers/net/qede/qede_logs.h23
-rw-r--r--drivers/net/qede/qede_main.c52
-rw-r--r--drivers/net/qede/qede_rxtx.c1439
-rw-r--r--drivers/net/qede/qede_rxtx.h38
-rw-r--r--drivers/net/ring/rte_eth_ring.c141
-rw-r--r--drivers/net/sfc/sfc.c4
-rw-r--r--drivers/net/sfc/sfc.h23
-rw-r--r--drivers/net/sfc/sfc_debug.h9
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h1
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h1
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c2
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c72
-rw-r--r--drivers/net/sfc/sfc_ethdev.c290
-rw-r--r--drivers/net/sfc/sfc_ev.c3
-rw-r--r--drivers/net/sfc/sfc_flow.c23
-rw-r--r--drivers/net/sfc/sfc_intr.c16
-rw-r--r--drivers/net/sfc/sfc_log.h13
-rw-r--r--drivers/net/sfc/sfc_port.c59
-rw-r--r--drivers/net/sfc/sfc_rx.c14
-rw-r--r--drivers/net/sfc/sfc_tx.c9
-rw-r--r--drivers/net/szedata2/Makefile1
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c128
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.h344
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.c203
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.h356
-rw-r--r--drivers/net/tap/rte_eth_tap.c521
-rw-r--r--drivers/net/tap/rte_eth_tap.h5
-rw-r--r--drivers/net/tap/tap_flow.c186
-rw-r--r--drivers/net/tap/tap_flow.h1
-rw-r--r--drivers/net/thunderx/Makefile4
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c4
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h4
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c13
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h4
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h10
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c4
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h4
-rw-r--r--drivers/net/thunderx/base/nicvf_plat.h7
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c25
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h4
-rw-r--r--drivers/net/thunderx/nicvf_logs.h4
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c4
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h4
-rw-r--r--drivers/net/thunderx/nicvf_struct.h4
-rw-r--r--drivers/net/thunderx/nicvf_svf.c4
-rw-r--r--drivers/net/thunderx/nicvf_svf.h4
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c22
-rw-r--r--drivers/net/virtio/virtio_ethdev.c55
-rw-r--r--drivers/net/virtio/virtio_pci.c20
-rw-r--r--drivers/net/virtio/virtio_rxtx.c5
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c4
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c1
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c363
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c69
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.c1
-rw-r--r--drivers/net/xenvirt/virtqueue.h12
404 files changed, 46080 insertions, 10669 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index a04a01fb..7fef66d7 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -39,5 +39,6 @@ DEPDIRS-net := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
DEPDIRS-crypto := mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
+DEPDIRS-event := bus
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 1e5b281c..02242148 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 NXP. All rights reserved.
-# All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index 973d279b..d1b790b5 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 NXP.
-# All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -63,12 +62,16 @@ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
qbman/qbman_portal.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ mc/dpmng.c \
mc/dpbp.c \
mc/dpio.c \
- mc/mc_sys.c
+ mc/mc_sys.c \
+ mc/dpcon.c \
+ mc/dpci.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpci.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_vfio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_bus.c
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index b24642dd..f71598d5 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +32,7 @@
#include <string.h>
#include <dirent.h>
+#include <stdbool.h>
#include <rte_log.h>
#include <rte_bus.h>
@@ -105,6 +106,25 @@ rte_fslmc_probe(void)
return ret;
}
+static struct rte_device *
+rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_dpaa2_device *dev;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ if (start && &dev->device == start) {
+ start = NULL; /* starting point found */
+ continue;
+ }
+
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
/*register a fslmc bus based dpaa2 driver */
void
rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
@@ -133,9 +153,10 @@ struct rte_fslmc_bus rte_fslmc_bus = {
.bus = {
.scan = rte_fslmc_scan,
.probe = rte_fslmc_probe,
+ .find_device = rte_fslmc_find_device,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.driver_list),
};
-RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
+RTE_REGISTER_BUS(fslmc, rte_fslmc_bus.bus);
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
index a890e6c3..1f7c24b3 100644
--- a/drivers/bus/fslmc/fslmc_logs.h
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 5d4ac67c..45e59277 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,6 @@
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/mman.h>
#include <sys/vfs.h>
#include <libgen.h>
@@ -55,7 +54,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <rte_bus.h>
#include "rte_fslmc.h"
@@ -80,6 +78,17 @@ static uint32_t *msi_intr_vaddr;
void *(*rte_mcp_ptr_list);
static uint32_t mcp_id;
static int is_dma_done;
+static struct rte_fslmc_object_list fslmc_obj_list =
+ TAILQ_HEAD_INITIALIZER(fslmc_obj_list);
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_object_register(struct rte_dpaa2_object *object)
+{
+ RTE_VERIFY(object);
+
+ TAILQ_INSERT_TAIL(&fslmc_obj_list, object, next);
+}
static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
{
@@ -91,9 +100,9 @@ static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
container = &vfio_containers[i];
if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER,
&container->fd)) {
- FSLMC_VFIO_LOG(INFO, "Container pre-exists with"
- " FD[0x%x] for this group",
- container->fd);
+ FSLMC_VFIO_LOG(INFO,
+ "Container pre-exists with FD[0x%x] for this group",
+ container->fd);
vfio_group->container = container;
return 0;
}
@@ -132,7 +141,6 @@ static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
if (vfio_containers[i].used)
continue;
- FSLMC_VFIO_LOG(DEBUG, "Unused container at index %d", i);
container = &vfio_containers[i];
}
if (!container) {
@@ -178,29 +186,6 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
return -errno;
}
-int vfio_dmamap_mem_region(uint64_t vaddr,
- uint64_t iova,
- uint64_t size)
-{
- struct fslmc_vfio_group *group;
- struct vfio_iommu_type1_dma_map dma_map = {
- .argsz = sizeof(dma_map),
- .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
- };
-
- dma_map.vaddr = vaddr;
- dma_map.size = size;
- dma_map.iova = iova;
-
- /* SET DMA MAP for IOMMU */
- group = &vfio_groups[0];
- if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) {
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA (errno = %d)", errno);
- return -1;
- }
- return 0;
-}
-
int rte_fslmc_vfio_dmamap(void)
{
int ret;
@@ -215,17 +200,18 @@ int rte_fslmc_vfio_dmamap(void)
if (is_dma_done)
return 0;
- is_dma_done = 1;
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- memseg = rte_eal_get_physmem_layout();
- if (memseg == NULL) {
- FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
- return -ENODEV;
- }
+ memseg = rte_eal_get_physmem_layout();
+ if (memseg == NULL) {
+ FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
+ return -ENODEV;
+ }
- if (memseg[i].addr == NULL && memseg[i].len == 0)
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if (memseg[i].addr == NULL && memseg[i].len == 0) {
+ FSLMC_VFIO_LOG(DEBUG, "Total %d segments found.", i);
break;
+ }
dma_map.size = memseg[i].len;
dma_map.vaddr = memseg[i].addr_64;
@@ -245,16 +231,20 @@ int rte_fslmc_vfio_dmamap(void)
FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX",
dma_map.vaddr);
- FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX\n", dma_map.size);
+ FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX", dma_map.size);
ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
&dma_map);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API"
- "(errno = %d)", errno);
+ FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API(errno = %d)",
+ errno);
return ret;
}
- FSLMC_VFIO_LOG(DEBUG, "-----> dma_map.vaddr = 0x%llX",
- dma_map.vaddr);
+ }
+
+ /* Verifying that at least single segment is available */
+ if (i <= 0) {
+ FSLMC_VFIO_LOG(ERR, "No Segments found for VFIO Mapping");
+ return -1;
}
/* TODO - This is a W.A. as VFIO currently does not add the mapping of
@@ -263,6 +253,8 @@ int rte_fslmc_vfio_dmamap(void)
*/
vfio_map_irq_region(group);
+ is_dma_done = 1;
+
return 0;
}
@@ -277,8 +269,8 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
/* getting the mcp object's fd*/
mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
if (mc_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO get device %s fd from group"
- " %d", mcp_obj, group->fd);
+ FSLMC_VFIO_LOG(ERR, "error in VFIO get dev %s fd from group %d",
+ mcp_obj, group->fd);
return v_addr;
}
@@ -297,7 +289,7 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
}
FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx",
- reg_info.offset, reg_info.size);
+ reg_info.offset, reg_info.size);
v_addr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
@@ -351,6 +343,40 @@ fslmc_bus_add_device(struct rte_dpaa2_device *dev)
}
}
+#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle,
+ uint32_t index)
+{
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int *fd_ptr, fd, ret;
+
+ /* Prepare vfio_irq_set structure and SET the IRQ in VFIO */
+ /* Give the eventfd to VFIO */
+ fd = eventfd(0, 0);
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ *fd_ptr = fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret < 0) {
+ FSLMC_VFIO_LOG(ERR, "Unable to set IRQ in VFIO, ret: %d\n",
+ ret);
+ return -1;
+ }
+
+ /* Set the FD and update the flags */
+ intr_handle->fd = fd;
+ return 0;
+}
+
/* Following function shall fetch total available list of MC devices
* from VFIO container & populate private list of devices and other
* data structures
@@ -366,14 +392,13 @@ int fslmc_vfio_process_group(void)
char path[PATH_MAX];
int64_t v_addr;
int ndev_count;
- int dpio_count = 0, dpbp_count = 0;
struct fslmc_vfio_group *group = &vfio_groups[0];
static int process_once;
/* if already done once */
if (process_once) {
- FSLMC_VFIO_LOG(DEBUG, "Already scanned once - re-scan "
- "not supported");
+ FSLMC_VFIO_LOG(DEBUG,
+ "Already scanned once - re-scan not supported");
return 0;
}
process_once = 0;
@@ -397,8 +422,8 @@ int fslmc_vfio_process_group(void)
free(mcp_obj);
mcp_obj = malloc(sizeof(dir->d_name));
if (!mcp_obj) {
- FSLMC_VFIO_LOG(ERR, "mcp obj:Unable to"
- " allocate memory");
+ FSLMC_VFIO_LOG(ERR,
+ "mcp obj:alloc failed");
closedir(d);
return -ENOMEM;
}
@@ -441,8 +466,6 @@ int fslmc_vfio_process_group(void)
goto FAILURE;
}
- FSLMC_VFIO_LOG(DEBUG, "DPAA2 MC has VIR_ADD = %ld", v_addr);
-
rte_mcp_ptr_list[0] = (void *)v_addr;
d = opendir(path);
@@ -452,7 +475,6 @@ int fslmc_vfio_process_group(void)
}
i = 0;
- FSLMC_VFIO_LOG(DEBUG, "DPAA2 - Parsing devices:");
/* Parsing each object and initiating them*/
while ((dir = readdir(d)) != NULL) {
if (dir->d_type != DT_LNK)
@@ -469,14 +491,13 @@ int fslmc_vfio_process_group(void)
object_type = strtok(dir->d_name, ".");
temp_obj = strtok(NULL, ".");
sscanf(temp_obj, "%d", &object_id);
- FSLMC_VFIO_LOG(DEBUG, " - %s ", dev_name);
/* getting the device fd*/
dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
if (dev_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "VFIO_GROUP_GET_DEVICE_FD error"
- " Device fd: %s, Group: %d",
- dev_name, group->fd);
+ FSLMC_VFIO_LOG(ERR,
+ "GET_DEVICE_FD error fd: %s, Group: %d",
+ dev_name, group->fd);
free(dev_name);
goto FAILURE;
}
@@ -505,22 +526,22 @@ int fslmc_vfio_process_group(void)
dev->dev_type = (strcmp(object_type, "dpseci")) ?
DPAA2_MC_DPNI_DEVID : DPAA2_MC_DPSECI_DEVID;
- FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added [%s-%d]\n",
- object_type, object_id);
+ sprintf(dev->name, "%s.%d", object_type, object_id);
+ dev->device.name = dev->name;
fslmc_bus_add_device(dev);
- }
- if (!strcmp(object_type, "dpio")) {
- ret = dpaa2_create_dpio_device(vdev,
- &device_info,
+ FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added %s", dev->name);
+ } else {
+ /* Parse all other objects */
+ struct rte_dpaa2_object *object;
+
+ TAILQ_FOREACH(object, &fslmc_obj_list, next) {
+ if (!strcmp(object_type, object->name))
+ object->create(vdev, &device_info,
object_id);
- if (!ret)
- dpio_count++;
- }
- if (!strcmp(object_type, "dpbp")) {
- ret = dpaa2_create_dpbp_device(object_id);
- if (!ret)
- dpbp_count++;
+ else
+ continue;
+ }
}
}
closedir(d);
@@ -529,8 +550,6 @@ int fslmc_vfio_process_group(void)
if (ret)
FSLMC_VFIO_LOG(DEBUG, "Error in affining qbman swp %d", ret);
- FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added dpbp_count = %d dpio_count=%d\n",
- dpbp_count, dpio_count);
return 0;
FAILURE:
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index 53dd0b74..0aff9b1f 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,6 +39,10 @@
#define DPAA2_VENDOR_ID 0x1957
#define DPAA2_MC_DPNI_DEVID 7
#define DPAA2_MC_DPSECI_DEVID 3
+#define DPAA2_MC_DPCON_DEVID 5
+#define DPAA2_MC_DPIO_DEVID 9
+#define DPAA2_MC_DPBP_DEVID 10
+#define DPAA2_MC_DPCI_DEVID 11
#define VFIO_MAX_GRP 1
@@ -63,20 +67,48 @@ typedef struct fslmc_vfio_container {
struct fslmc_vfio_group *group_list[VFIO_MAX_GRP];
} fslmc_vfio_container;
-int vfio_dmamap_mem_region(
- uint64_t vaddr,
- uint64_t iova,
- uint64_t size);
+struct rte_dpaa2_object;
+
+TAILQ_HEAD(rte_fslmc_object_list, rte_dpaa2_object);
+
+typedef int (*rte_fslmc_obj_create_t)(struct fslmc_vfio_device *vdev,
+ struct vfio_device_info *obj_info,
+ int object_id);
+
+/**
+ * A structure describing a DPAA2 driver.
+ */
+struct rte_dpaa2_object {
+ TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */
+ const char *name; /**< Name of Object. */
+ uint16_t object_id; /**< DPAA2 Object ID */
+ rte_fslmc_obj_create_t create;
+};
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle,
+ uint32_t index);
int fslmc_vfio_setup_group(void);
int fslmc_vfio_process_group(void);
int rte_fslmc_vfio_dmamap(void);
-/* create dpio device */
-int dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
- struct vfio_device_info *obj_info,
- int object_id);
+/**
+ * Register a DPAA2 MC Object driver.
+ *
+ * @param mc_object
+ * A pointer to a rte_dpaa_object structure describing the mc object
+ * to be registered.
+ */
+void rte_fslmc_object_register(struct rte_dpaa2_object *object);
-int dpaa2_create_dpbp_device(int dpbp_id);
+/** Helper for DPAA2 object registration */
+#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
+RTE_INIT(dpaa2objinitfn_ ##nm); \
+static void dpaa2objinitfn_ ##nm(void) \
+{\
+ (dpaa2_obj).name = RTE_STR(nm);\
+ rte_fslmc_object_register(&dpaa2_obj); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
#endif /* _FSLMC_VFIO_H_ */
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index 12f9180a..fd9a52d9 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
new file mode 100644
index 00000000..0ea78379
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -0,0 +1,307 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpci.h>
+#include <fsl_dpci_cmd.h>
+
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPCI_CMD_OPEN(cmd, dpci_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPCI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_GET_ATTRIBUTES(cmd, attr);
+
+ return 0;
+}
+
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPCI_CMD_GET_RX_QUEUE(cmd, priority);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_GET_RX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ DPCI_CMD_GET_TX_QUEUE(cmd, priority);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCI_RSP_GET_TX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPCI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
new file mode 100644
index 00000000..b078dff8
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -0,0 +1,230 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpcon.h>
+#include <fsl_dpcon_cmd.h>
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPCON_CMD_OPEN(cmd, dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPCON_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCON_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPCON_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPCON_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index d84232a3..608b57a7 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -257,6 +257,50 @@ int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
return 0;
}
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index);
+
+ return 0;
+}
+
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
+ cmd_flags,
+ token);
+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
int dpio_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/dpmng.c b/drivers/bus/fslmc/mc/dpmng.c
new file mode 100644
index 00000000..dd1c3ac0
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpmng.c
@@ -0,0 +1,88 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpmng.h>
+#include <fsl_dpmng_cmd.h>
+
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPMNG_RSP_GET_VERSION(cmd, mc_ver_info);
+
+ return 0;
+}
+
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_SOC_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPMNG_RSP_GET_SOC_VERSION(cmd, mc_platform_info);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index d14e25d1..32bb9aa3 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
index 1428dc6e..f0ee65a6 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
new file mode 100644
index 00000000..1e155dd7
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -0,0 +1,404 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPCI_H
+#define __FSL_DPCI_H
+
+/* Data Path Communication Interface API
+ * Contains initialization APIs and runtime control APIs for DPCI
+ */
+
+struct fsl_mc_io;
+
+/** General DPCI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPCI object
+ */
+#define DPCI_PRIO_NUM 2
+
+/**
+ * Indicates an invalid frame queue
+ */
+#define DPCI_FQID_NOT_VALID (uint32_t)(-1)
+
+/**
+ * All queues considered; see dpci_set_rx_queue()
+ */
+#define DPCI_ALL_QUEUES (uint8_t)(-1)
+
+/**
+ * dpci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpci_id: DPCI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token);
+
+/**
+ * dpci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * Enable the Order Restoration support
+ */
+#define DPCI_OPT_HAS_OPR 0x000040
+
+/**
+ * Order Point Records are shared for the entire DPCI
+ */
+#define DPCI_OPT_OPR_SHARED 0x000080
+
+/**
+ * struct dpci_cfg - Structure representing DPCI configuration
+ * @options: Any combination of the following options:
+ * DPCI_OPT_HAS_OPR
+ * DPCI_OPT_OPR_SHARED
+ * @num_of_priorities: Number of receive priorities (queues) for the DPCI;
+ * note, that the number of transmit priorities (queues)
+ * is determined by the number of receive priorities of
+ * the peer DPCI object
+ */
+struct dpci_cfg {
+ uint32_t options;
+ uint8_t num_of_priorities;
+};
+
+/**
+ * dpci_create() - Create the DPCI object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPCI object, allocate required resources and perform required
+ * initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpci_destroy() - Destroy the DPCI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpci_is_enabled() - Check if the DPCI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpci_reset() - Reset the DPCI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpci_attr - Structure representing DPCI attributes
+ * @id: DPCI object ID
+ * @num_of_priorities: Number of receive priorities
+ */
+struct dpci_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+/**
+ * dpci_get_attributes() - Retrieve DPCI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr);
+
+/**
+ * enum dpci_dest - DPCI destination types
+ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is
+ * expected to dequeue from the queue based on polling or
+ * other user-defined method
+ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected
+ * to dequeue from the queue only after notification is
+ * received
+ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified
+ * DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpci_dest {
+ DPCI_DEST_NONE = 0,
+ DPCI_DEST_DPIO = 1,
+ DPCI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpci_dest_cfg - Structure representing DPCI destination configuration
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPCI_DEST_NONE' option
+ */
+struct dpci_dest_cfg {
+ enum dpci_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/** DPCI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPCI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPCI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in
+ * 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpci_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+};
+
+/**
+ * dpci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation; use
+ * DPCI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpci_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+/**
+ * dpci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_rx_queue_attr *attr);
+
+/**
+ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to peer DPCI;
+ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if
+ * the selected priority exceeds the number of priorities of the
+ * peer DPCI object
+ */
+struct dpci_tx_queue_attr {
+ uint32_t fqid;
+};
+
+/**
+ * dpci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities of the peer DPCI object
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr);
+
+/**
+ * dpci_get_api_version() - Get communication interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path communication interface API
+ * @minor_ver: Minor version of data path communication interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
new file mode 100644
index 00000000..6d4e2730
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -0,0 +1,147 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPCI_CMD_H
+#define _FSL_DPCI_CMD_H
+
+/* DPCI Version */
+#define DPCI_VER_MAJOR 3
+#define DPCI_VER_MINOR 3
+
+/* Command IDs */
+#define DPCI_CMDID_CLOSE 0x8001
+#define DPCI_CMDID_OPEN 0x8071
+#define DPCI_CMDID_CREATE 0x9072
+#define DPCI_CMDID_DESTROY 0x9871
+#define DPCI_CMDID_GET_API_VERSION 0xa071
+
+#define DPCI_CMDID_ENABLE 0x0021
+#define DPCI_CMDID_DISABLE 0x0031
+#define DPCI_CMDID_GET_ATTR 0x0041
+#define DPCI_CMDID_RESET 0x0051
+#define DPCI_CMDID_IS_ENABLED 0x0061
+
+#define DPCI_CMDID_SET_IRQ_ENABLE 0x0121
+#define DPCI_CMDID_GET_IRQ_ENABLE 0x0131
+#define DPCI_CMDID_SET_IRQ_MASK 0x0141
+#define DPCI_CMDID_GET_IRQ_MASK 0x0151
+#define DPCI_CMDID_GET_IRQ_STATUS 0x0161
+#define DPCI_CMDID_CLEAR_IRQ_STATUS 0x0171
+
+#define DPCI_CMDID_SET_RX_QUEUE 0x0e01
+#define DPCI_CMDID_GET_LINK_STATE 0x0e11
+#define DPCI_CMDID_GET_PEER_ATTR 0x0e21
+#define DPCI_CMDID_GET_RX_QUEUE 0x0e31
+#define DPCI_CMDID_GET_TX_QUEUE 0x0e41
+#define DPCI_CMDID_SET_OPR 0x0e51
+#define DPCI_CMDID_GET_OPR 0x0e61
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_OPEN(cmd, dpci_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_of_priorities);\
+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_ATTRIBUTES(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, (attr)->id);\
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->num_of_priorities);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_PEER_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->peer_id);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_of_priorities);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_LINK_STATE(cmd, up) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, up)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority);\
+ MC_CMD_OP(cmd, 0, 48, 4, enum dpci_dest, cfg->dest_cfg.dest_type);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_GET_RX_QUEUE(cmd, priority) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_RX_QUEUE(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
+ MC_RSP_OP(cmd, 0, 48, 4, enum dpci_dest, attr->dest_cfg.dest_type);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_CMD_GET_TX_QUEUE(cmd, priority) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_TX_QUEUE(cmd, attr) \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCI_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+#endif /* _FSL_DPCI_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
new file mode 100644
index 00000000..0ed9db56
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -0,0 +1,238 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPCON_H
+#define __FSL_DPCON_H
+
+/* Data Path Concentrator API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+struct fsl_mc_io;
+
+/** General DPCON macros */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token);
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_cfg - Structure representing DPCON configuration
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_cfg {
+ uint8_t num_priorities;
+};
+
+/**
+ * dpcon_create() - Create the DPCON object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPCON object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpcon_is_enabled() - Check if the DPCON is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+};
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr);
+
+/**
+ * dpcon_get_api_version() - Get Data Path Concentrator API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path concentrator API
+ * @minor_ver: Minor version of data path concentrator API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPCON_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
new file mode 100644
index 00000000..f7f76902
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
@@ -0,0 +1,175 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPCON_CMD_H
+#define _FSL_DPCON_CMD_H
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 2
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE ((0x800 << 4) | (0x1))
+#define DPCON_CMDID_OPEN ((0x808 << 4) | (0x1))
+#define DPCON_CMDID_CREATE ((0x908 << 4) | (0x1))
+#define DPCON_CMDID_DESTROY ((0x988 << 4) | (0x1))
+#define DPCON_CMDID_GET_API_VERSION ((0xa08 << 4) | (0x1))
+
+#define DPCON_CMDID_ENABLE ((0x002 << 4) | (0x1))
+#define DPCON_CMDID_DISABLE ((0x003 << 4) | (0x1))
+#define DPCON_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
+#define DPCON_CMDID_RESET ((0x005 << 4) | (0x1))
+#define DPCON_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
+
+#define DPCON_CMDID_SET_IRQ ((0x010 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ ((0x011 << 4) | (0x1))
+#define DPCON_CMDID_SET_IRQ_ENABLE ((0x012 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ_ENABLE ((0x013 << 4) | (0x1))
+#define DPCON_CMDID_SET_IRQ_MASK ((0x014 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ_MASK ((0x015 << 4) | (0x1))
+#define DPCON_CMDID_GET_IRQ_STATUS ((0x016 << 4) | (0x1))
+#define DPCON_CMDID_CLEAR_IRQ_STATUS ((0x017 << 4) | (0x1))
+
+#define DPCON_CMDID_SET_NOTIFICATION ((0x100 << 4) | (0x1))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_OPEN(cmd, dpcon_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_CREATE(cmd, cfg) \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPCON_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+#endif /* _FSL_DPCON_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h
index 6d86f07d..4448cca5 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -230,6 +230,36 @@ int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
uint8_t *sdest);
/**
+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ * @channel_index: Returned channel index to be used in qbman API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index);
+
+/**
+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id);
+
+/**
* struct dpio_attr - Structure representing DPIO attributes
* @id: DPIO object ID
* @qbman_portal_ce_offset: offset of the software portal cache-enabled area
diff --git a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
index b1147de2..d7575077 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
new file mode 100644
index 00000000..c2ddde09
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -0,0 +1,106 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPMNG_H
+#define __FSL_DPMNG_H
+
+/* Management Complex General API
+ * Contains general API for the Management Complex firmware
+ */
+
+struct fsl_mc_io;
+
+/**
+ * Management Complex firmware version information
+ */
+#define MC_VER_MAJOR 10
+#define MC_VER_MINOR 1
+
+/**
+ * struct mc_versoin
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct mc_version {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t revision;
+};
+
+/**
+ * struct mc_platform
+ * @svr: system version (content of platform SVR register)
+ * @pvr: processor version (content of platform PVR register)
+ */
+struct mc_soc_version {
+ uint32_t svr;
+ uint32_t pvr;
+};
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info);
+
+/**
+ * mc_get_soc_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_platform_info: Returned version information structure. The structure
+ * contains the values of SVR and PVR registers. Please consult platform
+ * specific reference manual for detailed information.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int mc_get_soc_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_soc_version *mc_platform_info);
+
+#endif /* __FSL_DPMNG_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
new file mode 100644
index 00000000..3a36b6dc
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
@@ -0,0 +1,61 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPMNG_CMD_H
+#define __FSL_DPMNG_CMD_H
+
+/* Command IDs */
+#define DPMNG_CMDID_GET_VERSION 0x8311
+#define DPMNG_CMDID_GET_SOC_VERSION 0x8321
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_ver_info->revision); \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_ver_info->major); \
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPMNG_RSP_GET_SOC_VERSION(cmd, mc_soc_version) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_soc_version->svr); \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_soc_version->pvr); \
+} while (0)
+
+#endif /* __FSL_DPMNG_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
index d2252228..0ca4345c 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index 2fb285c1..33f9eedf 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -53,29 +53,20 @@
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-TAILQ_HEAD(dpbp_device_list, dpaa2_dpbp_dev);
-static struct dpbp_device_list *dpbp_dev_list; /*!< DPBP device list */
+TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
+static struct dpbp_dev_list dpbp_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
-int
-dpaa2_create_dpbp_device(
- int dpbp_id)
+static int
+dpaa2_create_dpbp_device(struct fslmc_vfio_device *vdev __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpbp_id)
{
struct dpaa2_dpbp_dev *dpbp_node;
int ret;
- if (!dpbp_dev_list) {
- dpbp_dev_list = malloc(sizeof(struct dpbp_device_list));
- if (!dpbp_dev_list) {
- PMD_INIT_LOG(ERR, "Memory alloc failed in DPBP list\n");
- return -1;
- }
- /* Initialize the DPBP List */
- TAILQ_INIT(dpbp_dev_list);
- }
-
/* Allocate DPAA2 dpbp handle */
- dpbp_node = (struct dpaa2_dpbp_dev *)
- malloc(sizeof(struct dpaa2_dpbp_dev));
+ dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0);
if (!dpbp_node) {
PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device");
return -1;
@@ -88,7 +79,7 @@ dpaa2_create_dpbp_device(
if (ret) {
PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
ret);
- free(dpbp_node);
+ rte_free(dpbp_node);
return -1;
}
@@ -98,16 +89,16 @@ dpaa2_create_dpbp_device(
PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with"
" error code %d\n", ret);
dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
- free(dpbp_node);
+ rte_free(dpbp_node);
return -1;
}
dpbp_node->dpbp_id = dpbp_id;
rte_atomic16_init(&dpbp_node->in_use);
- TAILQ_INSERT_HEAD(dpbp_dev_list, dpbp_node, next);
+ TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next);
- PMD_INIT_LOG(DEBUG, "Buffer pool resource initialized %d", dpbp_id);
+ PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpbp.%d]", dpbp_id);
return 0;
}
@@ -117,7 +108,7 @@ struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void)
struct dpaa2_dpbp_dev *dpbp_dev = NULL;
/* Get DPBP dev handle from list using index */
- TAILQ_FOREACH(dpbp_dev, dpbp_dev_list, next) {
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
if (dpbp_dev && rte_atomic16_test_and_set(&dpbp_dev->in_use))
break;
}
@@ -130,10 +121,17 @@ void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
struct dpaa2_dpbp_dev *dpbp_dev = NULL;
/* Match DPBP handle and mark it free */
- TAILQ_FOREACH(dpbp_dev, dpbp_dev_list, next) {
+ TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) {
if (dpbp_dev == dpbp) {
rte_atomic16_dec(&dpbp_dev->in_use);
return;
}
}
}
+
+static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = {
+ .object_id = DPAA2_MC_DPBP_DEVID,
+ .create = dpaa2_create_dpbp_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpbp, rte_dpaa2_dpbp_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
new file mode 100644
index 00000000..478e4f7e
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -0,0 +1,179 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include <mc/fsl_dpci.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+TAILQ_HEAD(dpci_dev_list, dpaa2_dpci_dev);
+static struct dpci_dev_list dpci_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpci_dev_list); /*!< DPCI device list */
+
+static int
+rte_dpaa2_create_dpci_device(struct fslmc_vfio_device *vdev __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpci_id)
+{
+ struct dpaa2_dpci_dev *dpci_node;
+ struct dpci_attr attr;
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ struct dpci_rx_queue_attr rx_attr;
+ int ret, i;
+
+ /* Allocate DPAA2 dpci handle */
+ dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
+ if (!dpci_node) {
+ PMD_INIT_LOG(ERR, "Memory allocation failed for DPCI Device");
+ return -1;
+ }
+
+ /* Open the dpci object */
+ dpci_node->dpci.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpci_open(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_id, &dpci_node->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ /* Get the device attributes */
+ ret = dpci_get_attributes(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token, &attr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Reading device failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ /* Set up the Rx Queue */
+ memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
+ ret = dpci_set_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token,
+ 0, &rx_queue_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Setting Rx queue failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ /* Enable the device */
+ ret = dpci_enable(&dpci_node->dpci,
+ CMD_PRI_LOW, dpci_node->token);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Enabling device failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ /* Get the Rx FQID's */
+ ret = dpci_get_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &rx_attr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR,
+ "Reading device failed with err code: %d",
+ ret);
+ rte_free(dpci_node);
+ return -1;
+ }
+
+ dpci_node->queue[i].fqid = rx_attr.fqid;
+ }
+
+ dpci_node->dpci_id = dpci_id;
+ rte_atomic16_init(&dpci_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
+
+ PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpci.%d]", dpci_id);
+
+ return 0;
+}
+
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Get DPCI dev handle from list using index */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev && rte_atomic16_test_and_set(&dpci_dev->in_use))
+ break;
+ }
+
+ return dpci_dev;
+}
+
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci)
+{
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+
+ /* Match DPCI handle and mark it free */
+ TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
+ if (dpci_dev == dpci) {
+ rte_atomic16_dec(&dpci_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpci_obj = {
+ .object_id = DPAA2_MC_DPCI_DEVID,
+ .create = rte_dpaa2_create_dpci_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpci, rte_dpaa2_dpci_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index a1a58b9c..283441b4 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -46,6 +46,8 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include<sys/eventfd.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
@@ -55,20 +57,23 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <fslmc_vfio.h>
#include "dpaa2_hw_pvt.h"
#include "dpaa2_hw_dpio.h"
+#include <mc/fsl_dpmng.h>
#define NUM_HOST_CPUS RTE_MAX_LCORE
struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
-TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
-static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
+struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+TAILQ_HEAD(dpio_dev_list, dpaa2_dpio_dev);
+static struct dpio_dev_list dpio_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpio_dev_list); /*!< DPIO device list */
static uint32_t io_space_count;
/*Stashing Macros default for LS208x*/
@@ -102,6 +107,95 @@ dpaa2_core_cluster_sdest(int cpu_id)
return dpaa2_core_cluster_base + x;
}
+static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
+{
+#define STRING_LEN 28
+#define COMMAND_LEN 50
+ uint32_t cpu_mask = 1;
+ int ret;
+ size_t len = 0;
+ char *temp = NULL, *token = NULL;
+ char string[STRING_LEN], command[COMMAND_LEN];
+ FILE *file;
+
+ snprintf(string, STRING_LEN, "dpio.%d", dpio_id);
+ file = fopen("/proc/interrupts", "r");
+ if (!file) {
+ PMD_DRV_LOG(WARNING, "Failed to open /proc/interrupts file\n");
+ return;
+ }
+ while (getline(&temp, &len, file) != -1) {
+ if ((strstr(temp, string)) != NULL) {
+ token = strtok(temp, ":");
+ break;
+ }
+ }
+
+ if (!token) {
+ PMD_DRV_LOG(WARNING, "Failed to get interrupt id for dpio.%d\n",
+ dpio_id);
+ if (temp)
+ free(temp);
+ fclose(file);
+ return;
+ }
+
+ cpu_mask = cpu_mask << rte_lcore_id();
+ snprintf(command, COMMAND_LEN, "echo %X > /proc/irq/%s/smp_affinity",
+ cpu_mask, token);
+ ret = system(command);
+ if (ret < 0)
+ PMD_DRV_LOG(WARNING,
+ "Failed to affine interrupts on respective core\n");
+ else
+ PMD_DRV_LOG(WARNING, " %s command is executed\n", command);
+
+ free(temp);
+ fclose(file);
+}
+
+static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
+{
+ struct epoll_event epoll_ev;
+ int eventfd, dpio_epoll_fd, ret;
+ int threshold = 0x3, timeout = 0xFF;
+
+ dpio_epoll_fd = epoll_create(1);
+ ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Interrupt registeration failed\n");
+ return -1;
+ }
+
+ if (getenv("DPAA2_PORTAL_INTR_THRESHOLD"))
+ threshold = atoi(getenv("DPAA2_PORTAL_INTR_THRESHOLD"));
+
+ if (getenv("DPAA2_PORTAL_INTR_TIMEOUT"))
+ sscanf(getenv("DPAA2_PORTAL_INTR_TIMEOUT"), "%x", &timeout);
+
+ qbman_swp_interrupt_set_trigger(dpio_dev->sw_portal,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(dpio_dev->sw_portal, 0xffffffff);
+ qbman_swp_interrupt_set_inhibit(dpio_dev->sw_portal, 0);
+ qbman_swp_dqrr_thrshld_write(dpio_dev->sw_portal, threshold);
+ qbman_swp_intr_timeout_write(dpio_dev->sw_portal, timeout);
+
+ eventfd = dpio_dev->intr_handle.fd;
+ epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
+ epoll_ev.data.fd = eventfd;
+
+ ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "epoll_ctl failed\n");
+ return -1;
+ }
+ dpio_dev->epoll_fd = dpio_epoll_fd;
+
+ dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id);
+
+ return 0;
+}
+
static int
configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
{
@@ -147,8 +241,6 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
}
PMD_INIT_LOG(DEBUG, "Qbman Portal ID %d", attr.qbman_portal_id);
- PMD_INIT_LOG(DEBUG, "Portal CE adr 0x%lX", attr.qbman_portal_ce_offset);
- PMD_INIT_LOG(DEBUG, "Portal CI adr 0x%lX", attr.qbman_portal_ci_offset);
/* Configure & setup SW portal */
p_des.block = NULL;
@@ -166,19 +258,31 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
return -1;
}
- PMD_INIT_LOG(DEBUG, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal);
-
return 0;
}
static int
-dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
+dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
{
- int sdest;
- int cpu_id, ret;
+ int sdest, ret;
+ static int first_time;
+
+ /* find the SoC type for the first time */
+ if (!first_time) {
+ struct mc_soc_version mc_plat_info = {0};
+
+ if (mc_get_soc_version(dpio_dev->dpio,
+ CMD_PRI_LOW, &mc_plat_info)) {
+ PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
+ dpaa2_core_cluster_base = 0x02;
+ dpaa2_cluster_sz = 4;
+ PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected");
+ }
+ first_time = 1;
+ }
/* Set the Stashing Destination */
- cpu_id = rte_lcore_id();
if (cpu_id < 0) {
cpu_id = rte_get_master_lcore();
if (cpu_id < 0) {
@@ -188,8 +292,6 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
}
/* Set the STASH Destination depending on Current CPU ID.
* Valid values of SDEST are 4,5,6,7. Where,
- * CPU 0-1 will have SDEST 4
- * CPU 2-3 will have SDEST 5.....and so on.
*/
sdest = dpaa2_core_cluster_sdest(cpu_id);
@@ -203,16 +305,21 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
return -1;
}
+ if (dpaa2_dpio_intr_init(dpio_dev)) {
+ PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n");
+ return -1;
+ }
+
return 0;
}
-static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
{
struct dpaa2_dpio_dev *dpio_dev = NULL;
int ret;
/* Get DPIO dev handle from list using index */
- TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
+ TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) {
if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
break;
}
@@ -222,7 +329,7 @@ static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
dpio_dev, dpio_dev->index, syscall(SYS_gettid));
- ret = dpaa2_configure_stashing(dpio_dev);
+ ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
if (ret)
PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed");
@@ -262,7 +369,7 @@ dpaa2_affine_qbman_swp(void)
}
/* Populate the dpaa2_io_portal structure */
- dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp();
+ dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp(lcore_id);
if (dpaa2_io_portal[lcore_id].dpio_dev) {
RTE_PER_LCORE(_dpaa2_io).dpio_dev
@@ -308,7 +415,7 @@ dpaa2_affine_qbman_swp_sec(void)
}
/* Populate the dpaa2_io_portal structure */
- dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp();
+ dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp(lcore_id);
if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
@@ -320,13 +427,14 @@ dpaa2_affine_qbman_swp_sec(void)
}
}
-int
+static int
dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
struct vfio_device_info *obj_info,
- int object_id)
+ int object_id)
{
struct dpaa2_dpio_dev *dpio_dev;
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
+ int vfio_dev_fd;
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
@@ -334,80 +442,57 @@ dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
return -1;
}
- if (!dpio_dev_list) {
- dpio_dev_list = malloc(sizeof(struct dpio_device_list));
- if (!dpio_dev_list) {
- PMD_INIT_LOG(ERR, "Memory alloc failed in DPIO list\n");
- return -1;
- }
-
- /* Initialize the DPIO List */
- TAILQ_INIT(dpio_dev_list);
- }
-
- dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev));
+ dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
+ RTE_CACHE_LINE_SIZE);
if (!dpio_dev) {
PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n");
return -1;
}
- PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]", dpio_dev);
dpio_dev->dpio = NULL;
dpio_dev->hw_id = object_id;
- dpio_dev->vfio_fd = vdev->fd;
+ dpio_dev->intr_handle.vfio_dev_fd = vdev->fd;
rte_atomic16_init(&dpio_dev->ref_count);
/* Using single portal for all devices */
dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
reg_info.index = 0;
- if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ vfio_dev_fd = dpio_dev->intr_handle.vfio_dev_fd;
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
- free(dpio_dev);
+ rte_free(dpio_dev);
return -1;
}
- PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
- PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
dpio_dev->ce_size = reg_info.size;
dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
- dpio_dev->vfio_fd, reg_info.offset);
-
- /* Create Mapping for QBMan Cache Enabled area. This is a fix for
- * SMMU fault for DQRR statshing transaction.
- */
- if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr,
- reg_info.offset, reg_info.size)) {
- PMD_INIT_LOG(ERR, "DMAMAP for Portal CE area failed.\n");
- free(dpio_dev);
- return -1;
- }
+ vfio_dev_fd, reg_info.offset);
reg_info.index = 1;
- if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
- free(dpio_dev);
+ rte_free(dpio_dev);
return -1;
}
- PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
- PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
dpio_dev->ci_size = reg_info.size;
dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
- dpio_dev->vfio_fd, reg_info.offset);
+ vfio_dev_fd, reg_info.offset);
if (configure_dpio_qbman_swp(dpio_dev)) {
PMD_INIT_LOG(ERR,
"Fail to configure the dpio qbman portal for %d\n",
dpio_dev->hw_id);
- free(dpio_dev);
+ rte_free(dpio_dev);
return -1;
}
io_space_count++;
dpio_dev->index = io_space_count;
- TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next);
+ TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
+ PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpio.%d]", object_id);
return 0;
}
@@ -437,9 +522,15 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
}
return 0;
fail:
- i -= 1;
- while (i >= 0)
+ while (--i >= 0)
rte_free(q_storage->dq_storage[i]);
return -1;
}
+
+static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
+ .object_id = DPAA2_MC_DPIO_DEVID,
+ .create = dpaa2_create_dpio_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpio, rte_dpaa2_dpio_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index f2e11680..e845340c 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,6 +42,7 @@ struct dpaa2_io_portal_t {
struct dpaa2_dpio_dev *sec_dpio_dev;
uint64_t net_tid;
uint64_t sec_tid;
+ void *eventdev;
};
/*! Global per thread DPIO portal */
@@ -53,6 +54,10 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal
+extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
+
+struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id);
+
/* Affine a DPIO portal to current processing thread */
int dpaa2_affine_qbman_swp(void);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index c0223734..5d7a8282 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,6 +34,8 @@
#ifndef _DPAA2_HW_PVT_H_
#define _DPAA2_HW_PVT_H_
+#include <rte_eventdev.h>
+
#include <mc/fsl_mc_sys.h>
#include <fsl_qbman_portal.h>
@@ -46,6 +48,10 @@
#define lower_32_bits(x) ((uint32_t)(x))
#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+#define SVR_LS1080A 0x87030000
+#define SVR_LS2080A 0x87010000
+#define SVR_LS2088A 0x87090000
+
#ifndef ETH_VLAN_HLEN
#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
#endif
@@ -65,7 +71,7 @@
#define MAX_BPID 256
#define DPAA2_MBUF_HW_ANNOTATION 64
-#define DPAA2_FD_PTA_SIZE 64
+#define DPAA2_FD_PTA_SIZE 0
#if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
@@ -75,6 +81,8 @@
#define DPAA2_HW_BUF_RESERVE 0
#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
+#define DPAA2_DPCI_MAX_QUEUES 2
+
struct dpaa2_dpio_dev {
TAILQ_ENTRY(dpaa2_dpio_dev) next;
/**< Pointer to Next device instance */
@@ -93,8 +101,11 @@ struct dpaa2_dpio_dev {
uintptr_t qbman_portal_ci_paddr;
/**< Physical address of Cache Inhibit Area */
uintptr_t ci_size; /**< Size of the CI region */
- int32_t vfio_fd; /**< File descriptor received via VFIO */
+ struct rte_intr_handle intr_handle; /* Interrupt related info */
+ int32_t epoll_fd; /**< File descriptor created for interrupt polling */
int32_t hw_id; /**< An unique ID of this DPIO device instance */
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
};
struct dpaa2_dpbp_dev {
@@ -108,8 +119,16 @@ struct dpaa2_dpbp_dev {
struct queue_storage_info_t {
struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
+ struct qbman_result *active_dqs;
+ int active_dpio_id;
+ int toggle;
};
+typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev);
+
struct dpaa2_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
void *dev;
@@ -120,7 +139,30 @@ struct dpaa2_queue {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t err_pkts;
- struct queue_storage_info_t *q_storage;
+ union {
+ struct queue_storage_info_t *q_storage;
+ struct qbman_result *cscn;
+ };
+ dpaa2_queue_cb_dqrr_t *cb;
+};
+
+struct swp_active_dqs {
+ struct qbman_result *global_active_dqs;
+ uint64_t reserved[7];
+};
+
+#define NUM_MAX_SWP 64
+
+extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+
+struct dpaa2_dpci_dev {
+ TAILQ_ENTRY(dpaa2_dpci_dev) next;
+ /**< Pointer to Next device instance */
+ struct fsl_mc_io dpci; /** handle to DPCI portal object */
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpci_id; /*HW ID for DPCI object */
+ struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES];
};
/*! Global MCP list */
@@ -137,6 +179,19 @@ struct qbman_fle {
uint32_t reserved[3]; /* Not used currently */
};
+struct qbman_sge {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t length;
+ uint32_t fin_bpid_offset;
+};
+
+/* There are three types of frames: Single, Scatter Gather and Frame Lists */
+enum qbman_fd_format {
+ qbman_fd_single = 0,
+ qbman_fd_list,
+ qbman_fd_sg
+};
/*Macros to define operations on FD*/
#define DPAA2_SET_FD_ADDR(fd, addr) do { \
fd->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
@@ -163,10 +218,17 @@ struct qbman_fle {
fle->addr_lo = lower_32_bits((uint64_t)addr); \
fle->addr_hi = upper_32_bits((uint64_t)addr); \
} while (0)
+#define DPAA2_GET_FLE_CTXT(fle) \
+ (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
+ (fle)->reserved[0])
+#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
+ fle->reserved[0] = lower_32_bits((uint64_t)addr); \
+ fle->reserved[1] = upper_32_bits((uint64_t)addr); \
+} while (0)
#define DPAA2_SET_FLE_OFFSET(fle, offset) \
((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
-#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff)
+#define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
#define DPAA2_SET_FLE_FIN(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 31)
#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
@@ -178,6 +240,7 @@ struct qbman_fle {
#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
#define DPAA2_SET_FLE_SG_EXT(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 29)
#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
((fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
@@ -187,6 +250,17 @@ struct qbman_fle {
#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
+#define DPAA2_FD_SET_FORMAT(fd, format) do { \
+ (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
+ (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
+} while (0)
+#define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
+
+#define DPAA2_SG_SET_FINAL(sg, fin) do { \
+ (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
+ (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
+} while (0)
+#define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
/* Only Enqueue Error responses will be
* pushed on FQID_ERR of Enqueue FQ
*/
@@ -231,7 +305,7 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
/**
* When we are using Physical addresses as IO Virtual Addresses,
* Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
- * whereever required.
+ * wherever required.
* These routines are called with help of below MACRO's
*/
@@ -264,7 +338,35 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+static inline
+int check_swp_active_dqs(uint16_t dpio_index)
+{
+ if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
+ return 1;
+ return 0;
+}
+
+static inline
+void clear_swp_active_dqs(uint16_t dpio_index)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
+}
+
+static inline
+struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
+{
+ return rte_global_active_dqs_list[dpio_index].global_active_dqs;
+}
+
+static inline
+void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
+{
+ rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
+}
struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
+struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
+void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);
+
#endif
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 41effe37..529f1ea3 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
- * All rights reserved.
+ * Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 77317723..9e9047e2 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -124,6 +124,36 @@ uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
/**
+ * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
+ * DQRR interrupt threshold register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_DQRR_ITR register.
+ */
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_intr_timeout_read_status() - Get the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ */
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_intr_timeout_write() - Set the data in software portal
+ * Interrupt Time-Out period register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_ITPR register.
+ */
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
+
+/**
* qbman_swp_interrupt_get_trigger() - Get the data in software portal
* interrupt enable register.
* @p: the given software portal object.
@@ -349,7 +379,7 @@ void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
*
* Return dqrr index.
*/
-uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr);
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
/**
* qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
@@ -883,6 +913,20 @@ void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
*/
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct qbman_fd *fd);
+/**
+ * qbman_swp_enqueue_multiple_eqdesc() - Enqueue multiple frames with separte
+ * enqueue descriptors.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptors
+ * @fd: the frame descriptor to be enqueued.
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
/* TODO:
* qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 5d407cc0..dd62e9af 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -44,6 +44,8 @@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
@@ -218,6 +220,26 @@ void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
}
+uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
+}
+
+void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
+}
+
+uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
+}
+
+void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
+}
+
uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
{
return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
@@ -288,7 +310,7 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
qbman_cena_invalidate_prefetch(&p->sys,
QBMAN_CENA_SWP_RR(p->mc.valid_bit));
ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
- /* Remove the valid-bit - command completed iff the rest is non-zero */
+ /* Remove the valid-bit - command completed if the rest is non-zero */
verb = ret[0] & ~QB_VALID_BIT;
if (!verb)
return NULL;
@@ -574,6 +596,76 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
return qbman_swp_enqueue_ring_mode(s, d, fd);
}
+int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi;
+ uint8_t diff;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ /*Pointing to the next enqueue descriptor*/
+ cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ cl = qb_cl(d);
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ if (!(eqcr_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ /*Pointing to the next enqueue descriptor*/
+ cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (uint64_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf((uint64_t *)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+ s->eqcr.pi = eqcr_pi;
+
+ return num_enqueued;
+}
+
/*************************/
/* Static (push) dequeue */
/*************************/
@@ -769,7 +861,7 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
*/
uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
- /* there are new entries iff pi != next_idx */
+ /* there are new entries if pi != next_idx */
if (pi == s->dqrr.next_idx)
return NULL;
/* if next_idx is/was the last ring index, and 'pi' is
@@ -1393,7 +1485,7 @@ int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1, ctx);
}
-uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)
+uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
{
return QBMAN_IDX_FROM_DQRR(dqrr);
}
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index 2db0fcef..3cdf14ef 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -49,3 +49,31 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_17.08 {
+ global:
+
+ dpaa2_io_portal;
+ dpaa2_get_qbman_swp;
+ dpci_set_rx_queue;
+ dpcon_open;
+ dpcon_get_attributes;
+ dpio_add_static_dequeue_channel;
+ dpio_remove_static_dequeue_channel;
+ mc_get_soc_version;
+ mc_get_version;
+ qbman_eq_desc_set_dca;
+ qbman_get_dqrr_from_idx;
+ qbman_get_dqrr_idx;
+ qbman_result_DQ_fqd_ctx;
+ qbman_result_SCN_state_in_mem;
+ qbman_swp_dqrr_consume;
+ qbman_swp_dqrr_next;
+ qbman_swp_enqueue_multiple_eqdesc;
+ qbman_swp_interrupt_clear_status;
+ qbman_swp_push_set;
+ rte_dpaa2_alloc_dpci_dev;
+ rte_fslmc_object_register;
+ rte_global_active_dqs_list;
+
+} DPDK_17.05;
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 040ab958..e60d6eba 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,9 +56,6 @@ extern "C" {
#include <rte_dev.h>
#include <rte_bus.h>
-/** Name of FSLMC Bus */
-#define FSLMC_BUS_NAME "FSLMC"
-
struct rte_dpaa2_driver;
/* DPAA2 Device and Driver lists for FSLMC bus */
@@ -81,6 +78,7 @@ struct rte_dpaa2_device {
uint16_t object_id; /**< DPAA2 Object ID */
struct rte_intr_handle intr_handle; /**< Interrupt handle */
struct rte_dpaa2_driver *driver; /**< Associated driver */
+ char name[32]; /**< DPAA2 Object name*/
};
typedef int (*rte_dpaa2_probe_t)(struct rte_dpaa2_driver *dpaa2_drv,
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index 59a7c6a9..6fca5e1c 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -31,6 +31,9 @@
include $(RTE_SDK)/mk/rte.vars.mk
ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
endif
# library name
@@ -47,7 +50,9 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies
-LDLIBS += -lisal_crypto
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index e9de6546..d6a18efc 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,26 +37,109 @@
#define LINUX
#endif
-#include <isa-l_crypto/aes_gcm.h>
+#include <gcm_defines.h>
+#include <aux_funcs.h>
-typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data,
- uint8_t *iv,
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+ RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+ RTE_AESNI_GCM_SSE,
+ RTE_AESNI_GCM_AVX,
+ RTE_AESNI_GCM_AVX2,
+ RTE_AESNI_GCM_VECTOR_NUM
+};
+
+enum aesni_gcm_key {
+ AESNI_GCM_KEY_128,
+ AESNI_GCM_KEY_192,
+ AESNI_GCM_KEY_256,
+ AESNI_GCM_KEY_NUM
+};
+
+
+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data, uint8_t *out,
+ const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
+ const uint8_t *aad, uint64_t aad_len,
+ uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data);
+
+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
+ const uint8_t *iv,
uint8_t const *aad,
uint64_t aad_len);
-typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data,
+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
uint8_t *out,
const uint8_t *in,
uint64_t plaintext_len);
-typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data,
+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
+ struct gcm_context_data *gcm_ctx_data,
uint8_t *auth_tag,
uint64_t auth_tag_len);
+/** GCM library function pointer table */
struct aesni_gcm_ops {
+ aesni_gcm_t enc; /**< GCM encode function pointer */
+ aesni_gcm_t dec; /**< GCM decode function pointer */
+ aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
aesni_gcm_init_t init;
- aesni_gcm_update_t update;
+ aesni_gcm_update_t update_enc;
+ aesni_gcm_update_t update_dec;
aesni_gcm_finalize_t finalize;
};
+#define AES_GCM_FN(keylen, arch) \
+aes_gcm_enc_##keylen##_##arch,\
+aes_gcm_dec_##keylen##_##arch,\
+aes_gcm_pre_##keylen##_##arch,\
+aes_gcm_init_##keylen##_##arch,\
+aes_gcm_enc_##keylen##_update_##arch,\
+aes_gcm_dec_##keylen##_update_##arch,\
+aes_gcm_enc_##keylen##_finalize_##arch,
+
+static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = {
+ [RTE_AESNI_GCM_NOT_SUPPORTED] = {
+ [AESNI_GCM_KEY_128] = {NULL},
+ [AESNI_GCM_KEY_192] = {NULL},
+ [AESNI_GCM_KEY_256] = {NULL}
+ },
+ [RTE_AESNI_GCM_SSE] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, sse)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, sse)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, sse)
+ }
+ },
+ [RTE_AESNI_GCM_AVX] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen2)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen2)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen2)
+ }
+ },
+ [RTE_AESNI_GCM_AVX2] = {
+ [AESNI_GCM_KEY_128] = {
+ AES_GCM_FN(128, avx_gen4)
+ },
+ [AESNI_GCM_KEY_192] = {
+ AES_GCM_FN(192, avx_gen4)
+ },
+ [AESNI_GCM_KEY_256] = {
+ AES_GCM_FN(256, avx_gen4)
+ }
+ }
+};
#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 101ef98b..d9c91d06 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -42,133 +43,155 @@
#include "aesni_gcm_pmd_private.h"
-/** GCM encode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_enc[] = {
- [AESNI_GCM_KEY_128] = {
- aesni_gcm128_init,
- aesni_gcm128_enc_update,
- aesni_gcm128_enc_finalize
- },
- [AESNI_GCM_KEY_256] = {
- aesni_gcm256_init,
- aesni_gcm256_enc_update,
- aesni_gcm256_enc_finalize
- }
-};
-
-/** GCM decode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_dec[] = {
- [AESNI_GCM_KEY_128] = {
- aesni_gcm128_init,
- aesni_gcm128_dec_update,
- aesni_gcm128_dec_finalize
- },
- [AESNI_GCM_KEY_256] = {
- aesni_gcm256_init,
- aesni_gcm256_dec_update,
- aesni_gcm256_dec_finalize
- }
-};
+static uint8_t cryptodev_driver_id;
/** Parse crypto xform chain and set private session parameters */
int
-aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+ struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform)
{
const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
-
- if (xform->next == NULL || xform->next->next != NULL) {
- GCM_LOG_ERR("Two and only two chained xform required");
- return -EINVAL;
- }
+ const struct rte_crypto_sym_xform *aead_xform;
+ uint16_t digest_length;
+ uint8_t key_length;
+ uint8_t *key;
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
- auth_xform = xform->next;
- cipher_xform = xform;
- } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* AES-GMAC */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform;
- cipher_xform = xform->next;
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+ GCM_LOG_ERR("Only AES GMAC is supported as an "
+ "authentication only algorithm");
+ return -ENOTSUP;
+ }
+ /* Set IV parameters */
+ sess->iv.offset = auth_xform->auth.iv.offset;
+ sess->iv.length = auth_xform->auth.iv.length;
+
+ /* Select Crypto operation */
+ if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GMAC_OP_GENERATE;
+ else
+ sess->op = AESNI_GMAC_OP_VERIFY;
+
+ key_length = auth_xform->auth.key.length;
+ key = auth_xform->auth.key.data;
+ digest_length = auth_xform->auth.digest_length;
+
+ /* AES-GCM */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = xform;
+
+ if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
+ GCM_LOG_ERR("The only combined operation "
+ "supported is AES GCM");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = aead_xform->aead.iv.offset;
+ sess->iv.length = aead_xform->aead.iv.length;
+
+ /* Select Crypto operation */
+ if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+ key_length = aead_xform->aead.key.length;
+ key = aead_xform->aead.key.data;
+
+ sess->aad_length = aead_xform->aead.aad_length;
+ digest_length = aead_xform->aead.digest_length;
} else {
- GCM_LOG_ERR("Cipher and auth xform required");
- return -EINVAL;
+ GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication");
+ return -ENOTSUP;
}
- if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
- (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
- auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
- GCM_LOG_ERR("We only support AES GCM and AES GMAC");
- return -EINVAL;
- }
- /* Select Crypto operation */
- if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
- auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
- sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
- else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
- auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
- sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
- else {
- GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
- " Decrypt/Verify are valid only");
+ /* IV check */
+ if (sess->iv.length != 16 && sess->iv.length != 12 &&
+ sess->iv.length != 0) {
+ GCM_LOG_ERR("Wrong IV length");
return -EINVAL;
}
/* Check key length and calculate GCM pre-compute. */
- switch (cipher_xform->cipher.key.length) {
+ switch (key_length) {
case 16:
- aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
sess->key = AESNI_GCM_KEY_128;
-
+ break;
+ case 24:
+ sess->key = AESNI_GCM_KEY_192;
break;
case 32:
- aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
sess->key = AESNI_GCM_KEY_256;
-
break;
default:
- GCM_LOG_ERR("Unsupported cipher key length");
+ GCM_LOG_ERR("Invalid key length");
+ return -EINVAL;
+ }
+
+ gcm_ops[sess->key].precomp(key, &sess->gdata_key);
+
+ /* Digest check */
+ if (digest_length != 16 &&
+ digest_length != 12 &&
+ digest_length != 8) {
+ GCM_LOG_ERR("digest");
return -EINVAL;
}
+ sess->digest_length = digest_length;
return 0;
}
/** Get gcm session */
static struct aesni_gcm_session *
-aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
{
struct aesni_gcm_session *sess = NULL;
-
- if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->session->dev_type
- != RTE_CRYPTODEV_AESNI_GCM_PMD))
- return sess;
-
- sess = (struct aesni_gcm_session *)op->session->_private;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct aesni_gcm_session *)
+ get_session_private_data(
+ sym_op->session,
+ cryptodev_driver_id);
} else {
void *_sess;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
- if (rte_mempool_get(qp->sess_mp, &_sess))
- return sess;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
- sess = (struct aesni_gcm_session *)
- ((struct rte_cryptodev_sym_session *)_sess)->_private;
+ sess = (struct aesni_gcm_session *)_sess_private_data;
- if (unlikely(aesni_gcm_set_session_parameters(sess,
- op->xform) != 0)) {
+ if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+ sess, sym_op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
sess = NULL;
}
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(sym_op->session, cryptodev_driver_id,
+ _sess_private_data);
}
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
return sess;
}
/**
- * Process a crypto operation and complete a JOB_AES_HMAC job structure for
- * submission to the multi buffer library for processing.
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
*
* @param qp queue pair
* @param op symmetric crypto operation
@@ -178,14 +201,27 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
*
*/
static int
-process_gcm_crypto_op(struct rte_crypto_sym_op *op,
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
- struct rte_mbuf *m_src = op->m_src;
- uint32_t offset = op->cipher.data.offset;
+ uint8_t *iv_ptr;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_mbuf *m_src = sym_op->m_src;
+ uint32_t offset, data_offset, data_length;
uint32_t part_len, total_len, data_len;
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
+ session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ offset = sym_op->aead.data.offset;
+ data_offset = offset;
+ data_length = sym_op->aead.data.length;
+ } else {
+ offset = sym_op->auth.data.offset;
+ data_offset = offset;
+ data_length = sym_op->auth.data.length;
+ }
+
RTE_ASSERT(m_src != NULL);
while (offset >= m_src->data_len) {
@@ -196,60 +232,50 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
}
data_len = m_src->data_len - offset;
- part_len = (data_len < op->cipher.data.length) ? data_len :
- op->cipher.data.length;
+ part_len = (data_len < data_length) ? data_len :
+ data_length;
/* Destination buffer is required when segmented source buffer */
- RTE_ASSERT((part_len == op->cipher.data.length) ||
- ((part_len != op->cipher.data.length) &&
- (op->m_dst != NULL)));
+ RTE_ASSERT((part_len == data_length) ||
+ ((part_len != data_length) &&
+ (sym_op->m_dst != NULL)));
/* Segmented destination buffer is not supported */
- RTE_ASSERT((op->m_dst == NULL) ||
- ((op->m_dst != NULL) &&
- rte_pktmbuf_is_contiguous(op->m_dst)));
+ RTE_ASSERT((sym_op->m_dst == NULL) ||
+ ((sym_op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(sym_op->m_dst)));
- dst = op->m_dst ?
- rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
- op->cipher.data.offset) :
- rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
- op->cipher.data.offset);
+ dst = sym_op->m_dst ?
+ rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+ data_offset) :
+ rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ data_offset);
src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
- /* sanity checks */
- if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
- op->cipher.iv.length != 0) {
- GCM_LOG_ERR("iv");
- return -1;
- }
-
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
/*
* GCM working in 12B IV mode => 16B pre-counter block we need
* to set BE LSB to 1, driver expects that 16B is allocated
*/
- if (op->cipher.iv.length == 12) {
- uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
+ if (session->iv.length == 12) {
+ uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
*iv_padd = rte_bswap32(1);
}
- if (op->auth.digest.length != 16 &&
- op->auth.digest.length != 12 &&
- op->auth.digest.length != 8) {
- GCM_LOG_ERR("digest");
- return -1;
- }
-
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
- aesni_gcm_enc[session->key].init(&session->gdata,
- op->cipher.iv.data,
- op->auth.aad.data,
- (uint64_t)op->auth.aad.length);
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
- aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
- total_len = op->cipher.data.length - part_len;
+ total_len = data_length - part_len;
while (total_len) {
dst += part_len;
@@ -261,33 +287,36 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
- aesni_gcm_enc[session->key].update(&session->gdata,
- dst, src,
+ qp->ops[session->key].update_enc(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
- aesni_gcm_enc[session->key].finalize(&session->gdata,
- op->auth.digest.data,
- (uint64_t)op->auth.digest.length);
- } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
- op->m_dst : op->m_src,
- op->auth.digest.length);
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->aead.digest.data,
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src,
+ session->digest_length);
if (!auth_tag) {
GCM_LOG_ERR("auth_tag");
return -1;
}
- aesni_gcm_dec[session->key].init(&session->gdata,
- op->cipher.iv.data,
- op->auth.aad.data,
- (uint64_t)op->auth.aad.length);
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ sym_op->aead.aad.data,
+ (uint64_t)session->aad_length);
- aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx, dst, src,
(uint64_t)part_len);
- total_len = op->cipher.data.length - part_len;
+ total_len = data_length - part_len;
while (total_len) {
dst += part_len;
@@ -299,15 +328,47 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
part_len = (m_src->data_len < total_len) ?
m_src->data_len : total_len;
- aesni_gcm_dec[session->key].update(&session->gdata,
+ qp->ops[session->key].update_dec(&session->gdata_key,
+ &qp->gdata_ctx,
dst, src,
(uint64_t)part_len);
total_len -= part_len;
}
- aesni_gcm_dec[session->key].finalize(&session->gdata,
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
auth_tag,
- (uint64_t)op->auth.digest.length);
+ (uint64_t)session->digest_length);
+ } else if (session->op == AESNI_GMAC_OP_GENERATE) {
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ sym_op->auth.digest.data,
+ (uint64_t)session->digest_length);
+ } else { /* AESNI_GMAC_OP_VERIFY */
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src,
+ session->digest_length);
+
+ if (!auth_tag) {
+ GCM_LOG_ERR("auth_tag");
+ return -1;
+ }
+
+ qp->ops[session->key].init(&session->gdata_key,
+ &qp->gdata_ctx,
+ iv_ptr,
+ src,
+ (uint64_t)data_length);
+
+ qp->ops[session->key].finalize(&session->gdata_key,
+ &qp->gdata_ctx,
+ auth_tag,
+ (uint64_t)session->digest_length);
}
return 0;
@@ -324,34 +385,38 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
* - Returns NULL on invalid job
*/
static void
-post_process_gcm_crypto_op(struct rte_crypto_op *op)
+post_process_gcm_crypto_op(struct rte_crypto_op *op,
+ struct aesni_gcm_session *session)
{
struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
- struct aesni_gcm_session *session =
- (struct aesni_gcm_session *)op->sym->session->_private;
-
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Verify digest if required */
- if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
+ session->op == AESNI_GMAC_OP_VERIFY) {
+ uint8_t *digest;
uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
- m->data_len - op->sym->auth.digest.length);
+ m->data_len - session->digest_length);
+
+ if (session->op == AESNI_GMAC_OP_VERIFY)
+ digest = op->sym->auth.digest.data;
+ else
+ digest = op->sym->aead.digest.data;
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
rte_hexdump(stdout, "auth tag (orig):",
- op->sym->auth.digest.data, op->sym->auth.digest.length);
+ digest, session->digest_length);
rte_hexdump(stdout, "auth tag (calc):",
- tag, op->sym->auth.digest.length);
+ tag, session->digest_length);
#endif
- if (memcmp(tag, op->sym->auth.digest.data,
- op->sym->auth.digest.length) != 0)
+ if (memcmp(tag, digest, session->digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+ rte_pktmbuf_trim(m, session->digest_length);
}
}
@@ -359,6 +424,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
* Process a completed GCM request
*
* @param qp Queue Pair to process
+ * @param op Crypto operation
* @param job JOB_AES_HMAC job
*
* @return
@@ -366,12 +432,17 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
*/
static void
handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
- struct rte_crypto_op *op)
+ struct rte_crypto_op *op,
+ struct aesni_gcm_session *sess)
{
- post_process_gcm_crypto_op(op);
+ post_process_gcm_crypto_op(op, sess);
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_gcm_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -392,21 +463,21 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
for (i = 0; i < nb_dequeued; i++) {
- sess = aesni_gcm_get_session(qp, ops[i]->sym);
+ sess = aesni_gcm_get_session(qp, ops[i]);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
break;
}
- retval = process_gcm_crypto_op(ops[i]->sym, sess);
+ retval = process_gcm_crypto_op(qp, ops[i], sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->qp_stats.dequeue_err_count++;
break;
}
- handle_completed_gcm_crypto_op(qp, ops[i]);
+ handle_completed_gcm_crypto_op(qp, ops[i], sess);
}
qp->qp_stats.dequeued_count += i;
@@ -438,6 +509,7 @@ aesni_gcm_create(const char *name,
{
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
+ enum aesni_gcm_vector_mode vector_mode;
if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name),
@@ -449,14 +521,23 @@ aesni_gcm_create(const char *name,
return -EFAULT;
}
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct aesni_gcm_private), init_params->socket_id);
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_GCM_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_GCM_AVX;
+ else
+ vector_mode = RTE_AESNI_GCM_SSE;
+
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct aesni_gcm_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
GCM_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_gcm_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -468,8 +549,24 @@ aesni_gcm_create(const char *name,
RTE_CRYPTODEV_FF_CPU_AESNI |
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ switch (vector_mode) {
+ case RTE_AESNI_GCM_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_GCM_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_GCM_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ default:
+ break;
+ }
+
internals = dev->data->dev_private;
+ internals->vector_mode = vector_mode;
+
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
@@ -498,7 +595,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -539,3 +636,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 1fc047be..48400ac2 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -49,32 +49,32 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.key_size = {
.min = 16,
.max = 32,
- .increment = 16
+ .increment = 8
},
.digest_size = {
.min = 8,
.max = 16,
.increment = 4
},
- .aad_size = {
- .min = 0,
- .max = 65535,
- .increment = 1
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
}
}, }
}, }
},
- { /* AES GCM (AUTH) */
+ { /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
- .increment = 16
+ .increment = 8
},
.digest_size = {
.min = 8,
@@ -85,21 +85,6 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.min = 0,
.max = 65535,
.increment = 1
- }
- }, }
- }, }
- },
- { /* AES GCM (CIPHER) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 16
},
.iv_size = {
.min = 12,
@@ -181,9 +166,9 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
struct aesni_gcm_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
- dev_info->feature_flags = dev->feature_flags;
- dev_info->capabilities = aesni_gcm_pmd_capabilities;
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_gcm_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
@@ -244,9 +229,10 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
static int
aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct aesni_gcm_qp *qp = NULL;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
@@ -264,12 +250,14 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup;
+ qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
+
qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -313,29 +301,57 @@ aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a aesni gcm session from a crypto xform chain */
-static void *
+static int
aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
if (unlikely(sess == NULL)) {
GCM_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (aesni_gcm_set_session_parameters(sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
GCM_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- if (sess)
- memset(sess, 0, sizeof(struct aesni_gcm_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 0496b447..7e155729 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include "aesni_gcm_ops.h"
+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+/**< AES-NI GCM PMD device name */
+
#define GCM_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
@@ -58,6 +61,8 @@
/** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private {
+ enum aesni_gcm_vector_mode vector_mode;
+ /**< Vector mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
@@ -65,36 +70,46 @@ struct aesni_gcm_private {
};
struct aesni_gcm_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
- /**< Unique Queue Pair Name */
+ const struct aesni_gcm_ops *ops;
+ /**< Architecture dependent function pointer table of the gcm APIs */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
+ struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
+ /**< GCM parameters */
+ struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
+ /**< Queue pair statistics */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
- /**< Queue pair statistics */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
} __rte_cache_aligned;
enum aesni_gcm_operation {
AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
- AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
-};
-
-enum aesni_gcm_key {
- AESNI_GCM_KEY_128,
- AESNI_GCM_KEY_256
+ AESNI_GCM_OP_AUTHENTICATED_DECRYPTION,
+ AESNI_GMAC_OP_GENERATE,
+ AESNI_GMAC_OP_VERIFY
};
/** AESNI GCM private session structure */
struct aesni_gcm_session {
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< Digest length */
enum aesni_gcm_operation op;
/**< GCM operation type */
enum aesni_gcm_key key;
/**< GCM key type */
- struct gcm_data gdata __rte_cache_aligned;
+ struct gcm_key_data gdata_key;
/**< GCM parameters */
};
@@ -109,7 +124,8 @@ struct aesni_gcm_session {
* - On failure returns error code < 0
*/
extern int
-aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+ struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 45b25c9d..16e14512 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,12 +34,15 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include "rte_aesni_mb_pmd_private.h"
+static uint8_t cryptodev_driver_id;
+
typedef void (*hash_one_block_t)(const void *data, void *digest);
typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
@@ -166,7 +169,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
break;
default:
MB_LOG_ERR("Unsupported authentication algorithm selection");
- return -1;
+ return -ENOTSUP;
}
/* Calculate Authentication precomputes */
@@ -194,7 +197,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
- return -1;
+ return -EINVAL;
}
/* Select cipher direction */
@@ -206,8 +209,8 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.direction = DECRYPT;
break;
default:
- MB_LOG_ERR("Unsupported cipher operation parameter");
- return -1;
+ MB_LOG_ERR("Invalid cipher operation parameter");
+ return -EINVAL;
}
/* Select cipher mode */
@@ -223,7 +226,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
break;
default:
MB_LOG_ERR("Unsupported cipher mode parameter");
- return -1;
+ return -ENOTSUP;
}
/* Check key length and choose key expansion function */
@@ -241,10 +244,14 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
break;
default:
- MB_LOG_ERR("Unsupported cipher key length");
- return -1;
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
}
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
/* Expanded cipher keys */
(*aes_keyexp_fn)(xform->cipher.key.data,
sess->cipher.expanded_aes_keys.encode,
@@ -261,6 +268,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
{
const struct rte_crypto_sym_xform *auth_xform = NULL;
const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ int ret;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
@@ -296,19 +304,25 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_NOT_SUPPORTED:
default:
MB_LOG_ERR("Unsupported operation chain order parameter");
- return -1;
+ return -ENOTSUP;
}
- if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
+ ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
+ if (ret != 0) {
MB_LOG_ERR("Invalid/unsupported authentication parameters");
- return -1;
+ return ret;
}
- if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
- cipher_xform)) {
+ ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+ cipher_xform);
+ if (ret != 0) {
MB_LOG_ERR("Invalid/unsupported cipher parameters");
- return -1;
+ return ret;
}
+
return 0;
}
@@ -344,30 +358,38 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_AESNI_MB_PMD)) {
- return NULL;
- }
-
- sess = (struct aesni_mb_session *)op->sym->session->_private;
- } else {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct aesni_mb_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
void *_sess = NULL;
+ void *_sess_private_data = NULL;
if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_sym_session *)_sess)->_private;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)_sess_private_data;
if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
return sess;
}
@@ -396,7 +418,6 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
return -1;
}
- op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
/* Set crypto operation */
job->chain_order = session->chain_order;
@@ -470,8 +491,9 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = op->sym->cipher.iv.data;
- job->iv_len_in_bytes = op->sym->cipher.iv.length;
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ job->iv_len_in_bytes = session->iv.length;
/* Data Parameter */
job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
@@ -494,8 +516,6 @@ static inline void
verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
- RTE_ASSERT(m_dst == NULL);
-
/* Verify digest if required */
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
@@ -508,31 +528,28 @@ verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
/**
* Process a completed job and return rte_mbuf which job processed
*
+ * @param qp Queue Pair to process
* @param job JOB_AES_HMAC job to process
*
* @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns processed crypto operation which mbuf is trimmed of output digest
+ * used in verification of supplied digest.
* - Returns NULL on invalid job
*/
static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+ struct aesni_mb_session *sess = get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
- struct aesni_mb_session *sess;
-
- RTE_ASSERT(op == NULL);
-
- if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+ if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
switch (job->status) {
case STS_COMPLETED:
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (job->hash_alg != NULL_HASH) {
- sess = (struct aesni_mb_session *)
- op->sym->session->_private;
-
if (sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
verify_digest(job, op);
@@ -544,7 +561,11 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_mb_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -569,7 +590,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
- while (job != NULL && processed_jobs < nb_ops) {
+ while (job != NULL) {
op = post_process_mb_job(qp, job);
if (op) {
@@ -579,6 +600,8 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
qp->stats.dequeue_err_count++;
break;
}
+ if (processed_jobs == nb_ops)
+ break;
job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
}
@@ -624,6 +647,9 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
int retval, processed_jobs = 0;
+ if (unlikely(nb_ops == 0))
+ return 0;
+
do {
/* Get next operation to process from ingress queue */
retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
@@ -691,21 +717,18 @@ cryptodev_aesni_mb_create(const char *name,
vector_mode = RTE_AESNI_MB_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_MB_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ else
vector_mode = RTE_AESNI_MB_SSE;
- else {
- MB_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct aesni_mb_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct aesni_mb_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
MB_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_mb_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -765,7 +788,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -806,3 +829,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_aesni_mb_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d1bc28e0..692b354f 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -48,16 +48,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
.max = 12,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -69,16 +69,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
.max = 12,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -90,16 +90,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 14,
.max = 14,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -111,16 +111,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -132,16 +132,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 24,
.max = 24,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -153,16 +153,16 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -183,7 +183,7 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.max = 12,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -220,9 +220,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.increment = 8
},
.iv_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
}
}, }
}, }
@@ -321,7 +321,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
struct aesni_mb_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = aesni_mb_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
@@ -397,7 +397,7 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
static int
aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct aesni_mb_qp *qp = NULL;
struct aesni_mb_private *internals = dev->data->dev_private;
@@ -426,7 +426,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->ingress_queue == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->stats, 0, sizeof(qp->stats));
@@ -472,36 +472,58 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static void *
+static int
aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
struct aesni_mb_private *internals = dev->data->dev_private;
+ int ret;
if (unlikely(sess == NULL)) {
MB_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
- sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+ sess_private_data, xform);
+ if (ret != 0) {
MB_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+aesni_mb_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct aesni_mb_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops aesni_mb_pmd_ops = {
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 0d82699c..6676948e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -35,6 +35,9 @@
#include "aesni_mb_ops.h"
+#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+/**< AES-NI Multi buffer PMD device name */
+
#define MB_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \
@@ -167,6 +170,11 @@ struct aesni_mb_qp {
/** AES-NI multi-buffer private session structure */
struct aesni_mb_session {
JOB_CHAIN_ORDER chain_order;
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
/** Cipher Parameters */
struct {
diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
index 1474951c..86611fa2 100644
--- a/drivers/crypto/armv8/Makefile
+++ b/drivers/crypto/armv8/Makefile
@@ -1,7 +1,7 @@
#
# BSD LICENSE
#
-# Copyright (C) Cavium networks Ltd. 2017.
+# Copyright (C) Cavium, Inc. 2017.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 3d603a5a..a5c39c9b 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -36,6 +36,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -44,6 +45,8 @@
#include "rte_armv8_pmd_private.h"
+static uint8_t cryptodev_driver_id;
+
static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev);
/**
@@ -288,27 +291,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess,
* Generate authentication key, i_key_pad and o_key_pad.
*/
/* Zero memory under key */
- memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH);
-
- if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) {
- /*
- * In case the key is longer than 160 bits
- * the algorithm will use SHA1(key) instead.
- */
- error = sha1_block(NULL, xform->auth.key.data,
- sess->auth.hmac.key, xform->auth.key.length);
- if (error != 0)
- return -1;
- } else {
- /*
- * Now copy the given authentication key to the session
- * key assuming that the session key is zeroed there is
- * no need for additional zero padding if the key is
- * shorter than SHA1_AUTH_KEY_LENGTH.
- */
- rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
- xform->auth.key.length);
- }
+ memset(sess->auth.hmac.key, 0, SHA1_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
/* Prepare HMAC padding: key|pattern */
auth_hmac_pad_prepare(sess, xform);
@@ -334,27 +324,14 @@ auth_set_prerequisites(struct armv8_crypto_session *sess,
* Generate authentication key, i_key_pad and o_key_pad.
*/
/* Zero memory under key */
- memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH);
-
- if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) {
- /*
- * In case the key is longer than 256 bits
- * the algorithm will use SHA256(key) instead.
- */
- error = sha256_block(NULL, xform->auth.key.data,
- sess->auth.hmac.key, xform->auth.key.length);
- if (error != 0)
- return -1;
- } else {
- /*
- * Now copy the given authentication key to the session
- * key assuming that the session key is zeroed there is
- * no need for additional zero padding if the key is
- * shorter than SHA256_AUTH_KEY_LENGTH.
- */
- rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
- xform->auth.key.length);
- }
+ memset(sess->auth.hmac.key, 0, SHA256_BLOCK_SIZE);
+
+ /*
+ * Now copy the given authentication key to the session
+ * key.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
/* Prepare HMAC padding: key|pattern */
auth_hmac_pad_prepare(sess, xform);
@@ -414,7 +391,7 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
order = sess->chain_order;
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
/* Select cipher direction */
sess->cipher.direction = cipher_xform->cipher.op;
@@ -431,10 +408,10 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
case RTE_CRYPTO_CIPHER_AES_CBC:
sess->cipher.algo = calg;
/* IV len is always 16 bytes (block size) for AES CBC */
- sess->cipher.iv_len = 16;
+ sess->cipher.iv.length = 16;
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
/* Select auth generate/verify */
sess->auth.operation = auth_xform->auth.op;
@@ -448,9 +425,12 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
+ /* Set the digest length */
+ sess->auth.digest_length = auth_xform->auth.digest_length;
+
/* Verify supported key lengths and extract proper algorithm */
switch (cipher_xform->cipher.key.length << 3) {
case 128:
@@ -465,7 +445,7 @@ armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
default: /* Fall through */
sess->crypto_func = NULL;
sess->cipher.key_sched = NULL;
- return -EINVAL;
+ return -ENOTSUP;
}
if (unlikely(sess->crypto_func == NULL)) {
@@ -519,20 +499,23 @@ armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
break;
default:
is_chained_op = false;
- return -EINVAL;
+ return -ENOTSUP;
}
+ /* Set IV offset */
+ sess->cipher.iv.offset = cipher_xform->cipher.iv.offset;
+
if (is_chained_op) {
ret = armv8_crypto_set_session_chained_parameters(sess,
cipher_xform, auth_xform);
if (unlikely(ret != 0)) {
ARMV8_CRYPTO_LOG_ERR(
"Invalid/unsupported chained (cipher/auth) parameters");
- return -EINVAL;
+ return ret;
}
} else {
ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
- return -EINVAL;
+ return -ENOTSUP;
}
return 0;
@@ -544,30 +527,36 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
{
struct armv8_crypto_session *sess = NULL;
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
/* get existing session */
- if (likely(op->sym->session != NULL &&
- op->sym->session->dev_type ==
- RTE_CRYPTODEV_ARMV8_PMD)) {
+ if (likely(op->sym->session != NULL)) {
sess = (struct armv8_crypto_session *)
- op->sym->session->_private;
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
}
} else {
/* provide internal session */
void *_sess = NULL;
+ void *_sess_private_data = NULL;
- if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
- sess = (struct armv8_crypto_session *)
- ((struct rte_cryptodev_sym_session *)_sess)
- ->_private;
-
- if (unlikely(armv8_crypto_set_session_parameters(
- sess, op->sym->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, _sess);
- sess = NULL;
- } else
- op->sym->session = _sess;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct armv8_crypto_session *)_sess_private_data;
+
+ if (unlikely(armv8_crypto_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
}
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -645,15 +634,11 @@ process_armv8_chained_op
}
} else {
adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
- op->sym->auth.digest.length);
- }
-
- if (unlikely(op->sym->cipher.iv.length != sess->cipher.iv_len)) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return;
+ sess->auth.digest_length);
}
- arg.cipher.iv = op->sym->cipher.iv.data;
+ arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher.iv.offset);
arg.cipher.key = sess->cipher.key.data;
/* Acquire combined mode function */
crypto_func = sess->crypto_func;
@@ -667,12 +652,12 @@ process_armv8_chained_op
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(adst, op->sym->auth.digest.data,
- op->sym->auth.digest.length) != 0) {
+ sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(m_asrc,
- op->sym->auth.digest.length);
+ sess->auth.digest_length);
}
}
@@ -699,8 +684,11 @@ process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
}
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct armv8_crypto_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -806,15 +794,16 @@ cryptodev_armv8_crypto_create(const char *name,
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct armv8_crypto_private),
- init_params->socket_id);
+ init_params->socket_id,
+ vdev);
if (dev == NULL) {
ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_ARMV8_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_armv8_crypto_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -860,7 +849,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -904,3 +893,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 4d9ccbfb..00297beb 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -50,16 +50,16 @@ static const struct rte_cryptodev_capabilities
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 16,
- .max = 128,
- .increment = 0
+ .min = 1,
+ .max = 64,
+ .increment = 1
},
.digest_size = {
.min = 20,
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -71,16 +71,16 @@ static const struct rte_cryptodev_capabilities
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 16,
- .max = 128,
- .increment = 0
+ .min = 1,
+ .max = 64,
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -178,7 +178,7 @@ armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
struct armv8_crypto_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = armv8_crypto_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
@@ -247,7 +247,7 @@ armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
static int
armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct armv8_crypto_qp *qp = NULL;
@@ -272,7 +272,7 @@ armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->stats, 0, sizeof(qp->stats));
@@ -316,33 +316,56 @@ armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure the session from a crypto xform chain */
-static void *
-armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+static int
+armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
ARMV8_CRYPTO_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (armv8_crypto_set_session_parameters(
- sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = armv8_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
- void *sess)
+armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
/* Zero out the whole structure */
- if (sess)
- memset(sess, 0, sizeof(struct armv8_crypto_session));
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct armv8_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index b75107f2..d02992a6 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,6 +33,9 @@
#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
#define _RTE_ARMV8_PMD_PRIVATE_H_
+#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+/**< ARMv8 Crypto PMD device name */
+
#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
@@ -159,8 +162,11 @@ struct armv8_crypto_session {
/**< cipher operation direction */
enum rte_crypto_cipher_algorithm algo;
/**< cipher algorithm */
- int iv_len;
- /**< IV length */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
struct {
uint8_t data[256];
@@ -192,10 +198,12 @@ struct armv8_crypto_session {
uint8_t o_key_pad[SHA_BLOCK_MAX]
__rte_cache_aligned;
/**< outer pad (max supported block length) */
- uint8_t key[SHA_AUTH_KEY_MAX];
- /**< HMAC key (max supported length)*/
+ uint8_t key[SHA_BLOCK_MAX];
+ /**< HMAC key (max supported block length)*/
} hmac;
};
+ uint16_t digest_length;
+ /* Digest length */
} auth;
} __rte_cache_aligned;
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index 11c7c785..ae15c99f 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -1,7 +1,7 @@
# BSD LICENSE
#
# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright (c) 2016 NXP. All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -46,6 +46,12 @@ CFLAGS += $(WERROR_FLAGS)
endif
CFLAGS += -D _GNU_SOURCE
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
+CFLAGS += -Wno-implicit-fallthrough
+endif
+endif
+
CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 4e01fe81..e0f6cfca 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -69,10 +69,158 @@
#define FSL_MC_DPSECI_DEVID 3
#define NO_PREFETCH 0
-#define TDES_CBC_IV_LEN 8
-#define AES_CBC_IV_LEN 16
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS 32000
+#define FLE_POOL_BUF_SIZE 256
+#define FLE_POOL_CACHE_SIZE 512
+
enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+static uint8_t cryptodev_driver_id;
+
+static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length, retval;
+ uint8_t *old_icv;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sym_op->aead.digest.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off - auth_only_len);
+ sge->length = sym_op->aead.data.length + auth_only_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->iv.length + auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ sess->digest_length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->aead.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data,
+ sess->digest_length);
+ memset(sym_op->aead.digest.data, 0, sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->digest_length +
+ sess->iv.length +
+ auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+
+ return 0;
+}
+
static inline int
build_authenc_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
@@ -84,9 +232,10 @@ build_authenc_fd(dpaa2_sec_session *sess,
struct sec_flow_context *flc;
uint32_t auth_only_len = sym_op->auth.data.length -
sym_op->cipher.data.length;
- int icv_len = sym_op->auth.digest.length;
+ int icv_len = sess->digest_length, retval;
uint8_t *old_icv;
- uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
PMD_INIT_FUNC_TRACE();
@@ -96,12 +245,14 @@ build_authenc_fd(dpaa2_sec_session *sess,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -133,10 +284,10 @@ build_authenc_fd(dpaa2_sec_session *sess,
"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
sym_op->auth.data.offset,
sym_op->auth.data.length,
- sym_op->auth.digest.length,
+ sess->digest_length,
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
+ sess->iv.length,
sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
@@ -159,9 +310,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
DPAA2_SET_FLE_ADDR(sge,
DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
- sym_op->cipher.iv.length));
+ sess->iv.length));
}
DPAA2_SET_FLE_FIN(sge);
@@ -173,13 +324,13 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FLE_SG_EXT(fle);
DPAA2_SET_FLE_FIN(fle);
fle->length = (sess->dir == DIR_ENC) ?
- (sym_op->auth.data.length + sym_op->cipher.iv.length) :
- (sym_op->auth.data.length + sym_op->cipher.iv.length +
- sym_op->auth.digest.length);
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ sess->digest_length);
/* Configure Input SGE for Encap/Decap */
- DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
- sge->length = sym_op->cipher.iv.length;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
sge++;
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
@@ -190,13 +341,13 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->auth.digest.data,
- sym_op->auth.digest.length);
- memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ sess->digest_length);
+ memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
- sym_op->auth.digest.length +
- sym_op->cipher.iv.length));
+ sess->digest_length +
+ sess->iv.length));
}
DPAA2_SET_FLE_FIN(sge);
if (auth_only_len) {
@@ -212,21 +363,19 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
- uint32_t mem_len = (sess->dir == DIR_ENC) ?
- (3 * sizeof(struct qbman_fle)) :
- (5 * sizeof(struct qbman_fle) +
- sym_op->auth.digest.length);
struct sec_flow_context *flc;
struct ctxt_priv *priv = sess->ctxt;
uint8_t *old_digest;
+ int retval;
PMD_INIT_FUNC_TRACE();
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -234,6 +383,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* We can have a better approach to use the inline Mbuf
*/
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
if (likely(bpid < MAX_BPID)) {
@@ -249,7 +399,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
- fle->length = sym_op->auth.digest.length;
+ fle->length = sess->digest_length;
DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
DPAA2_SET_FD_COMPOUND_FMT(fd);
@@ -280,17 +430,17 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
- sym_op->auth.digest.length);
+ sess->digest_length);
sge->length = sym_op->auth.data.length;
sge++;
old_digest = (uint8_t *)(sge + 1);
rte_memcpy(old_digest, sym_op->auth.digest.data,
- sym_op->auth.digest.length);
- memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ sess->digest_length);
+ memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
fle->length = sym_op->auth.data.length +
- sym_op->auth.digest.length;
+ sess->digest_length;
DPAA2_SET_FLE_FIN(sge);
}
DPAA2_SET_FLE_FIN(fle);
@@ -304,18 +454,20 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
- uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+ int retval;
struct sec_flow_context *flc;
struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
PMD_INIT_FUNC_TRACE();
- /* todo - we can use some mempool to avoid malloc here */
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -323,6 +475,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* We can have a better approach to use the inline Mbuf
*/
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
sge = fle + 2;
@@ -343,21 +496,21 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
flc = &priv->flc_desc[0].flc;
DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
- sym_op->cipher.iv.length);
+ sess->iv.length);
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
+ sess->iv.length,
sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
sym_op->m_src->data_off);
- fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
@@ -365,12 +518,12 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle++;
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
- fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
DPAA2_SET_FLE_SG_EXT(fle);
- DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
- sge->length = sym_op->cipher.iv.length;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
sge++;
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
@@ -406,6 +559,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
case DPAA2_SEC_AUTH:
ret = build_auth_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_CIPHER_HASH:
ret = build_authenc_fd(sess, op, fd, bpid);
break;
@@ -437,7 +593,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
if (unlikely(nb_ops == 0))
return 0;
- if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
return 0;
}
@@ -463,7 +619,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
sess = (dpaa2_sec_session *)
- (*ops)->sym->session->_private;
+ get_session_private_data(
+ (*ops)->sym->session,
+ cryptodev_driver_id);
mb_pool = (*ops)->sym->m_src->pool;
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
@@ -495,6 +653,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
{
struct qbman_fle *fle;
struct rte_crypto_op *op;
+ struct ctxt_priv *priv;
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
@@ -510,7 +669,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
if (unlikely(DPAA2_GET_FD_IVP(fd))) {
/* TODO complete it. */
- RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
+ RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
return NULL;
}
op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
@@ -530,7 +689,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
- rte_free(fle - 1);
+ priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+ rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
return op;
}
@@ -571,8 +731,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
/*Issue a volatile dequeue command. */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
- "QBMAN is busy\n");
+ RTE_LOG(WARNING, PMD,
+ "SEC VDQ command is not issued : QBMAN busy\n");
/* Portal was busy, try again */
continue;
}
@@ -656,7 +816,8 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
static int
dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
- __rte_unused int socket_id)
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
{
struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
struct dpaa2_sec_qp *qp;
@@ -747,19 +908,12 @@ dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
return sizeof(dpaa2_sec_session);
}
-static void
-dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
- void *sess __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static int
dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo cipherdata;
int bufsize, i;
struct ctxt_priv *priv;
@@ -772,16 +926,18 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
+
flc = &priv->flc_desc[0].flc;
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
rte_free(priv);
return -1;
}
@@ -794,23 +950,27 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
+ /* Set IV parameters */
+ session->iv.offset = xform->cipher.iv.offset;
+ session->iv.length = xform->cipher.iv.length;
+
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_AES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- ctxt->iv.length = AES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_3DES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- ctxt->iv.length = TDES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
case RTE_CRYPTO_CIPHER_3DES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_XTS:
@@ -820,7 +980,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
case RTE_CRYPTO_CIPHER_NULL:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
xform->cipher.algo);
goto error_out;
default:
@@ -832,8 +992,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
DIR_ENC : DIR_DEC;
bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
- &cipherdata, NULL, ctxt->iv.length,
- session->dir);
+ &cipherdata, NULL, session->iv.length,
+ session->dir);
if (bufsize < 0) {
RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
goto error_out;
@@ -868,9 +1028,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- unsigned int bufsize;
+ unsigned int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
@@ -882,16 +1042,17 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[DESC_INITFINAL].flc;
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for auth key");
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
rte_free(priv);
return -1;
}
@@ -904,6 +1065,8 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
+ session->digest_length = xform->auth.digest_length;
+
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;
@@ -936,7 +1099,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1:
@@ -945,13 +1107,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_GMAC:
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
xform->auth.algo);
goto error_out;
default:
@@ -964,7 +1125,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1, 0, &authdata, !session->dir,
- ctxt->trunc_len);
+ session->digest_length);
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
@@ -974,6 +1135,10 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
(uint64_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
return 0;
@@ -989,8 +1154,129 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
dpaa2_sec_session *session)
{
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo aeaddata;
+ unsigned int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set IV parameters */
+ session->iv.offset = aead_xform->iv.offset;
+ session->iv.length = aead_xform->iv.length;
+ session->ctxt_type = DPAA2_SEC_AEAD;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ rte_free(priv);
+ return -1;
+ }
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
+
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
+ ctxt->auth_only_len = aead_xform->aad_length;
+
+ aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.keylen = session->aead_key.length;
+ aeaddata.key_enc_flags = 0;
+ aeaddata.key_type = RTA_DATA_IMM;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata.algtype = OP_ALG_ALGSEL_AES;
+ aeaddata.algmode = OP_ALG_AAI_GCM;
+ session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
+ aead_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ goto error_out;
+ }
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = aeaddata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[1], 1);
+
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[1] & 1) {
+ aeaddata.key_type = RTA_DATA_IMM;
+ } else {
+ aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+ aeaddata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_gcm_encap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ else
+ bufsize = cnstr_shdsc_gcm_decap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->aead_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
- unsigned int bufsize;
+ unsigned int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
@@ -1012,21 +1298,27 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
}
+
+ /* Set IV parameters */
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+
/* For SEC AEAD only one descriptor is required */
priv = (struct ctxt_priv *)rte_zmalloc(NULL,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
rte_free(priv);
return -1;
}
@@ -1034,7 +1326,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key");
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
rte_free(session->cipher_key.data);
rte_free(priv);
return -1;
@@ -1045,12 +1337,13 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- ctxt->trunc_len = auth_xform->digest_length;
authdata.key = (uint64_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
+ session->digest_length = auth_xform->digest_length;
+
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;
@@ -1083,7 +1376,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1:
@@ -1092,13 +1384,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_GMAC:
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
auth_xform->algo);
goto error_out;
default:
@@ -1116,23 +1407,23 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
cipherdata.algtype = OP_ALG_ALGSEL_AES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- ctxt->iv.length = AES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_3DES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- ctxt->iv.length = TDES_CBC_IV_LEN;
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
cipher_xform->algo);
goto error_out;
default:
@@ -1151,7 +1442,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
@@ -1173,12 +1464,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
0, &cipherdata, &authdata,
- ctxt->iv.length,
+ session->iv.length,
ctxt->auth_only_len,
- ctxt->trunc_len,
+ session->digest_length,
session->dir);
} else {
- RTE_LOG(ERR, PMD, "Hash before cipher not supported");
+ RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
goto error_out;
}
@@ -1190,6 +1481,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
(uint64_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1200,8 +1494,8 @@ error_out:
return -1;
}
-static void *
-dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+static int
+dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
dpaa2_sec_session *session = sess;
@@ -1209,9 +1503,13 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct");
- return NULL;
+ RTE_LOG(ERR, PMD, "invalid session struct\n");
+ return -1;
}
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
/* Cipher Only */
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
session->ctxt_type = DPAA2_SEC_CIPHER;
@@ -1227,33 +1525,76 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
session->ext_params.aead_ctxt.auth_cipher_text = true;
- dpaa2_sec_aead_init(dev, xform, session);
+ dpaa2_sec_aead_chain_init(dev, xform, session);
/* Authenticate then Cipher */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
dpaa2_sec_aead_init(dev, xform, session);
+
} else {
- RTE_LOG(ERR, PMD, "Invalid crypto type");
- return NULL;
+ RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- return session;
+ ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+dpaa2_sec_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
PMD_INIT_FUNC_TRACE();
- dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
- if (s) {
+ if (sess_priv) {
rte_free(s->ctxt);
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
memset(sess, 0, sizeof(dpaa2_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -1263,7 +1604,7 @@ dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
{
PMD_INIT_FUNC_TRACE();
- return -ENOTSUP;
+ return 0;
}
static int
@@ -1366,7 +1707,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev)
/*Free the allocated memory for ethernet private data and dpseci*/
priv->hw = NULL;
- free(dpseci);
+ rte_free(dpseci);
return 0;
}
@@ -1383,7 +1724,7 @@ dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa2_sec_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ info->driver_id = cryptodev_driver_id;
}
}
@@ -1475,15 +1816,17 @@ static struct rte_cryptodev_ops crypto_ops = {
.queue_pair_stop = dpaa2_sec_queue_pair_stop,
.queue_pair_count = dpaa2_sec_queue_pair_count,
.session_get_size = dpaa2_sec_session_get_size,
- .session_initialize = dpaa2_sec_session_initialize,
.session_configure = dpaa2_sec_session_configure,
.session_clear = dpaa2_sec_session_clear,
};
static int
-dpaa2_sec_uninit(const struct rte_cryptodev_driver *crypto_drv __rte_unused,
- struct rte_cryptodev *dev)
+dpaa2_sec_uninit(const struct rte_cryptodev *dev)
{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->fle_pool);
+
PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
dev->data->name, rte_socket_id());
@@ -1500,6 +1843,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
uint16_t token;
struct dpseci_attr attr;
int retcode, hw_id;
+ char str[20];
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -1509,7 +1853,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
}
hw_id = dpaa2_dev->object_id;
- cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ cryptodev->driver_id = cryptodev_driver_id;
cryptodev->dev_ops = &crypto_ops;
cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
@@ -1560,6 +1904,18 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
internals->hw = dpseci;
internals->token = token;
+ sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+ internals->fle_pool = rte_mempool_create((const char *)str,
+ FLE_POOL_NUM_BUFS,
+ FLE_POOL_BUF_SIZE,
+ FLE_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->fle_pool) {
+ RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ goto init_error;
+ }
+
PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
return 0;
@@ -1599,7 +1955,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
dpaa2_dev->cryptodev = cryptodev;
cryptodev->device = &dpaa2_dev->device;
- cryptodev->driver = (struct rte_cryptodev_driver *)dpaa2_drv;
+ cryptodev->device->driver = &dpaa2_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
@@ -1627,7 +1983,7 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
if (cryptodev == NULL)
return -ENODEV;
- ret = dpaa2_sec_uninit(NULL, cryptodev);
+ ret = dpaa2_sec_uninit(cryptodev);
if (ret)
return ret;
@@ -1638,7 +1994,6 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
rte_free(cryptodev->data->dev_private);
cryptodev->device = NULL;
- cryptodev->driver = NULL;
cryptodev->data = NULL;
return 0;
@@ -1653,4 +2008,5 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
.remove = cryptodev_dpaa2_sec_remove,
};
-RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_dpaa2_sec_driver, cryptodev_driver_id);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index 03d4c703..b5e99f80 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f5c61694..3849a052 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,12 +34,16 @@
#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
+/**< NXP DPAA2 - SEC PMD device name */
+
#define MAX_QUEUES 64
#define MAX_DESC_SIZE 64
/** private data structure for each DPAA2_SEC device */
struct dpaa2_sec_dev_private {
void *mc_portal; /**< MC Portal for configuring this device */
void *hw; /**< Hardware handle for this device.Used by NADK framework */
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
int32_t hw_id; /**< An unique ID of this device instance */
int32_t vfio_fd; /**< File descriptor received via VFIO */
uint16_t token; /**< Token required by DPxxx objects */
@@ -128,6 +132,7 @@ struct sec_flc_desc {
};
struct ctxt_priv {
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
struct sec_flc_desc flc_desc[0];
};
@@ -135,6 +140,7 @@ enum dpaa2_sec_op_type {
DPAA2_SEC_NONE, /*!< No Cipher operations*/
DPAA2_SEC_CIPHER,/*!< CIPHER operations */
DPAA2_SEC_AUTH, /*!< Authentication Operations */
+ DPAA2_SEC_AEAD, /*!< AEAD (AES-GCM/CCM) type operations */
DPAA2_SEC_CIPHER_HASH, /*!< Authenticated Encryption with
* associated data
*/
@@ -147,30 +153,9 @@ enum dpaa2_sec_op_type {
DPAA2_SEC_MAX
};
-struct dpaa2_sec_cipher_ctxt {
- struct {
- uint8_t *data;
- uint16_t length;
- } iv; /**< Initialisation vector parameters */
- uint8_t *init_counter; /*!< Set initial counter for CTR mode */
-};
-
-struct dpaa2_sec_auth_ctxt {
- uint8_t trunc_len; /*!< Length for output ICV, should
- * be 0 if no truncation required
- */
-};
-
struct dpaa2_sec_aead_ctxt {
- struct {
- uint8_t *data;
- uint16_t length;
- } iv; /**< Initialisation vector parameters */
uint16_t auth_only_len; /*!< Length of data for Auth only */
uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
- uint8_t trunc_len; /*!< Length for output ICV, should
- * be 0 if no truncation required
- */
};
typedef struct dpaa2_sec_session_entry {
@@ -179,18 +164,29 @@ typedef struct dpaa2_sec_session_entry {
uint8_t dir; /*!< Operation Direction */
enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
- } cipher_key;
- struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
- } auth_key;
+ uint16_t length; /**< IV length in bytes */
+ uint16_t offset; /**< IV offset in bytes */
+ } iv;
+ uint16_t digest_length;
uint8_t status;
union {
- struct dpaa2_sec_cipher_ctxt cipher_ctxt;
- struct dpaa2_sec_auth_ctxt auth_ctxt;
struct dpaa2_sec_aead_ctxt aead_ctxt;
} ext_params;
} dpaa2_sec_session;
@@ -204,16 +200,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -225,16 +221,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -246,16 +242,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
.max = 28,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -267,16 +263,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
- },
- .aad_size = { 0 }
+ },
+ .iv_size = { 0 }
}, }
}, }
},
@@ -288,16 +284,16 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
.max = 48,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -309,16 +305,46 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 64,
.max = 64,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
}, }
}, }
},
@@ -342,6 +368,26 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
}, }
}, }
},
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
{ /* 3DES CBC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
diff --git a/drivers/crypto/dpaa2_sec/hw/compat.h b/drivers/crypto/dpaa2_sec/hw/compat.h
index 11fdaa8e..b17b27ef 100644
--- a/drivers/crypto/dpaa2_sec/hw/compat.h
+++ b/drivers/crypto/dpaa2_sec/hw/compat.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -49,7 +49,9 @@
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
+
#include <rte_byteorder.h>
+#include <rte_common.h>
#ifndef __BYTE_ORDER__
#error "Undefined endianness"
@@ -60,7 +62,7 @@
#endif
#ifndef __always_inline
-#define __always_inline (inline __attribute__((always_inline)))
+#define __always_inline __rte_always_inline
#endif
#ifndef __always_unused
diff --git a/drivers/crypto/dpaa2_sec/hw/desc.h b/drivers/crypto/dpaa2_sec/hw/desc.h
index 5185be22..0be4439c 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index c71ada07..5ae3a1ac 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -159,6 +159,10 @@ cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
* @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
* @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
* @ivlen: IV length
* @dir: DIR_ENC/DIR_DEC
@@ -172,8 +176,10 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
{
struct program prg;
struct program *p = &prg;
- const bool is_aes_dec = (dir == DIR_DEC) &&
- (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+ uint32_t iv_off = 0;
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
LABEL(keyjmp);
LABEL(skipdk);
REFERENCE(pkeyjmp);
@@ -191,7 +197,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
cipherdata->keylen, INLINE_KEY(cipherdata));
- if (is_aes_dec) {
+ if (need_dk) {
ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
@@ -199,7 +205,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
}
SET_LABEL(p, keyjmp);
- if (is_aes_dec) {
+ if (need_dk) {
ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
ICV_CHECK_DISABLE, dir);
@@ -209,12 +215,15 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
}
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ iv_off = 16;
+
if (iv)
/* IV load, convert size */
- LOAD(p, (uintptr_t)iv, CONTEXT1, 0, ivlen, IMMED | COPY);
+ LOAD(p, (uintptr_t)iv, CONTEXT1, iv_off, ivlen, IMMED | COPY);
else
/* IV is present first before the actual message */
- SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+ SEQLOAD(p, CONTEXT1, iv_off, ivlen, 0);
MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
@@ -224,7 +233,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
SEQFIFOSTORE(p, MSG, 0, 0, VLF);
PATCH_JUMP(p, pkeyjmp, keyjmp);
- if (is_aes_dec)
+ if (need_dk)
PATCH_JUMP(p, pskipdk, skipdk);
return PROGRAM_FINALIZE(p);
@@ -434,6 +443,211 @@ cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
}
/**
+ * cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @ivlen: Initialization vector length
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump2);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump2);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ MATHB(p, SEQINSZ, SUB, ivlen, VSEQOUTSZ, 4, IMMED2);
+ pzeroassocjump2 = JUMP(p, zeroassocjump2, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+ SET_LABEL(p, zeroassocjump1);
+
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* write encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | LAST1);
+
+ /* jump the zero-payload commands */
+ JUMP(p, 4, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* zero-payload commands */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | LAST1);
+
+ JUMP(p, 2, LOCAL_JUMP, ALL_TRUE, 0);
+
+ /* There is no input data */
+ SET_LABEL(p, zeroassocjump2);
+
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1 | LAST1);
+
+ /* write ICV */
+ SEQSTORE(p, CONTEXT1, 0, icvsize, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump2, zeroassocjump2);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_decap - AES-GCM decap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ * OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ uint32_t ivlen, uint32_t icvsize)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ LABEL(zeroassocjump1);
+ LABEL(zeropayloadjump);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pzeroassocjump1);
+ REFERENCE(pzeropayloadjump);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ SET_LABEL(p, keyjmp);
+
+ /* class 1 operation */
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+
+ MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+ SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+ pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* skip assoc data */
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+ /* read assoc data */
+ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+
+ SET_LABEL(p, zeroassocjump1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+ MATH_Z);
+
+ MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQOUTSZ, 4, 0);
+
+ /* store encrypted data */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ /* read payload data */
+ SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | FLUSH1);
+
+ /* zero-payload command */
+ SET_LABEL(p, zeropayloadjump);
+
+ /* read ICV */
+ SEQFIFOLOAD(p, ICV1, icvsize, CLASS1 | LAST1);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+ PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
* cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
* @descbuf: pointer to descriptor-under-construction buffer
* @swap: must be true when core endianness doesn't match SEC endianness
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/common.h b/drivers/crypto/dpaa2_sec/hw/desc/common.h
index 6b254908..c2ac99be 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/common.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/common.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index c63d0dac..cc637361 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -1311,8 +1311,11 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
- * @cipherdata: ointer to block cipher transform definitions.
+ * @cipherdata: pointer to block cipher transform definitions.
* Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * Valid modes for:
+ * AES: OP_ALG_AAI_* {CBC, CTR}
+ * DES, 3DES: OP_ALG_AAI_CBC
* @authdata: pointer to authentication transform definitions.
* Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
* SHA224, SHA256, SHA384, SHA512}
@@ -1379,8 +1382,9 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
{
struct program prg;
struct program *p = &prg;
- const bool is_aes_dec = (dir == DIR_DEC) &&
- (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+ const bool need_dk = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+ (cipherdata->algmode == OP_ALG_AAI_CBC);
LABEL(skip_patch_len);
LABEL(keyjmp);
@@ -1466,7 +1470,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
dir);
- if (is_aes_dec)
+ if (need_dk)
ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
@@ -1478,7 +1482,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
dir);
- if (is_aes_dec) {
+ if (need_dk) {
ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
ICV_CHECK_DISABLE, dir);
@@ -1503,7 +1507,10 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
SET_LABEL(p, aonly_len_offset);
/* Read IV */
- SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+ if (cipherdata->algmode == OP_ALG_AAI_CTR)
+ SEQLOAD(p, CONTEXT1, 16, ivlen, 0);
+ else
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
/*
* Read data needed only for authentication. This is overwritten above
diff --git a/drivers/crypto/dpaa2_sec/hw/rta.h b/drivers/crypto/dpaa2_sec/hw/rta.h
index 7cf4c8a9..283ad600 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
index 79a48da2..dc0a3342 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
index c2a27a2d..96c9f1e0 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
index 2c85beec..cba65073 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
index 9bef1155..8e097856 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
index 1db55b33..2e786a20 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
index a9b9091d..64a84b47 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
index 10d2e193..3b9ba792 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
index 30be0825..f1ead2da 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
index 5e88fb46..698ff530 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
index 2e7b2f2d..f134235a 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
index c12edb0c..11995aa2 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
index 8d421a5d..4aea5770 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
index ac4f3ff8..00597f30 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
index 8fd01801..1b75692e 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index 6b3411f0..4a109620 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 0fb47f73..6cc14a66 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index 1fb18cc3..3f9f4743 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 9da9e897..38cd8a9b 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -47,6 +48,8 @@
#define KASUMI_MAX_BURST 4
#define BYTE_LEN 8
+static uint8_t cryptodev_driver_id;
+
/** Get xform chain order. */
static enum kasumi_operation
kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -108,13 +111,20 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
case KASUMI_OP_NOT_SUPPORTED:
default:
KASUMI_LOG_ERR("Unsupported operation chain order parameter");
- return -EINVAL;
+ return -ENOTSUP;
}
if (cipher_xform) {
/* Only KASUMI F8 supported */
if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
+ return -ENOTSUP;
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+ if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
+ KASUMI_LOG_ERR("Wrong IV length");
return -EINVAL;
+ }
+
/* Initialize key */
sso_kasumi_init_f8_key_sched(cipher_xform->cipher.key.data,
&sess->pKeySched_cipher);
@@ -123,8 +133,15 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
if (auth_xform) {
/* Only KASUMI F9 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
+ KASUMI_LOG_ERR("Wrong digest length");
return -EINVAL;
+ }
+
sess->auth_op = auth_xform->auth.op;
+
/* Initialize key */
sso_kasumi_init_f9_key_sched(auth_xform->auth.key.data,
&sess->pKeySched_hash);
@@ -140,27 +157,40 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
static struct kasumi_session *
kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
{
- struct kasumi_session *sess;
-
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_KASUMI_PMD))
+ struct kasumi_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct kasumi_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct kasumi_session *)op->sym->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct kasumi_session *)c_sess->_private;
+ sess = (struct kasumi_session *)_sess_private_data;
if (unlikely(kasumi_set_session_parameters(sess,
- op->sym->xform) != 0))
- return NULL;
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
return sess;
}
@@ -173,17 +203,11 @@ process_kasumi_cipher_op(struct rte_crypto_op **ops,
unsigned i;
uint8_t processed_ops = 0;
uint8_t *src[num_ops], *dst[num_ops];
- uint64_t IV[num_ops];
+ uint8_t *iv_ptr;
+ uint64_t iv[num_ops];
uint32_t num_bytes[num_ops];
for (i = 0; i < num_ops; i++) {
- /* Sanity checks. */
- if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("iv");
- break;
- }
-
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?
@@ -191,14 +215,16 @@ process_kasumi_cipher_op(struct rte_crypto_op **ops,
(ops[i]->sym->cipher.data.offset >> 3) :
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
- IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data));
+ iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
+ iv[i] = *((uint64_t *)(iv_ptr));
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
processed_ops++;
}
if (processed_ops != 0)
- sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV,
+ sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, iv,
src, dst, num_bytes, processed_ops);
return processed_ops;
@@ -210,16 +236,10 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
struct kasumi_session *session)
{
uint8_t *src, *dst;
- uint64_t IV;
+ uint8_t *iv_ptr;
+ uint64_t iv;
uint32_t length_in_bits, offset_in_bits;
- /* Sanity checks. */
- if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("iv");
- return 0;
- }
-
offset_in_bits = op->sym->cipher.data.offset;
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
@@ -228,10 +248,12 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- IV = *((uint64_t *)(op->sym->cipher.iv.data));
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
+ iv = *((uint64_t *)(iv_ptr));
length_in_bits = op->sym->cipher.data.length;
- sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
src, dst, length_in_bits, offset_in_bits);
return 1;
@@ -248,23 +270,8 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
uint8_t *src, *dst;
uint32_t length_in_bits;
uint32_t num_bytes;
- uint32_t shift_bits;
- uint64_t IV;
- uint8_t direction;
for (i = 0; i < num_ops; i++) {
- if (unlikely(ops[i]->sym->auth.aad.length != KASUMI_IV_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("aad");
- break;
- }
-
- if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("digest");
- break;
- }
-
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -276,34 +283,28 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
- /* IV from AAD */
- IV = *((uint64_t *)(ops[i]->sym->auth.aad.data));
/* Direction from next bit after end of message */
- num_bytes = (length_in_bits >> 3) + 1;
- shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN;
- direction = (src[num_bytes - 1] >> shift_bits) & 0x01;
+ num_bytes = length_in_bits >> 3;
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ KASUMI_DIGEST_LENGTH);
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
- sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
- IV, src,
- length_in_bits, dst, direction);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
- ops[i]->sym->auth.digest.length) != 0)
+ KASUMI_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ KASUMI_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
- sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
- IV, src,
- length_in_bits, dst, direction);
+ sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
+ num_bytes, dst);
}
processed_ops++;
}
@@ -352,7 +353,11 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct kasumi_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -404,8 +409,9 @@ process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, op->sym->session);
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct kasumi_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
op->sym->session = NULL;
}
@@ -566,21 +572,18 @@ cryptodev_kasumi_create(const char *name,
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ else
cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- else {
- KASUMI_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct kasumi_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct kasumi_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
KASUMI_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_kasumi_pmd_ops;
/* Register RX/TX burst functions for data path. */
@@ -622,7 +625,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -664,3 +667,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_kasumi_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index 62ebdbd2..8033114b 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,11 +56,7 @@ static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
.max = 4,
.increment = 0
},
- .aad_size = {
- .min = 8,
- .max = 8,
- .increment = 0
- }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -156,7 +152,7 @@ kasumi_pmd_info_get(struct rte_cryptodev *dev,
struct kasumi_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -223,7 +219,7 @@ kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
static int
kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct kasumi_qp *qp = NULL;
@@ -248,7 +244,7 @@ kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -291,33 +287,56 @@ kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a KASUMI session from a crypto xform chain */
-static void *
+static int
kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
KASUMI_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (kasumi_set_session_parameters(sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = kasumi_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
KASUMI_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-kasumi_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+kasumi_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct kasumi_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct kasumi_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops kasumi_pmd_ops = {
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
index fb586caa..0ce2a2e3 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include <sso_kasumi.h>
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+/**< KASUMI PMD device name */
+
#define KASUMI_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
@@ -92,6 +95,7 @@ struct kasumi_session {
sso_kasumi_key_sched_t pKeySched_hash;
enum kasumi_operation op;
enum rte_crypto_auth_operation auth_op;
+ uint16_t cipher_iv_offset;
} __rte_cache_aligned;
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index 023450a3..2c827257 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,11 +33,14 @@
#include <rte_common.h>
#include <rte_config.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include "null_crypto_pmd_private.h"
+static uint8_t cryptodev_driver_id;
+
/** verify and set session parameters */
int
null_crypto_set_session_parameters(
@@ -45,7 +48,7 @@ null_crypto_set_session_parameters(
const struct rte_crypto_sym_xform *xform)
{
if (xform == NULL) {
- return -1;
+ return -EINVAL;
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next == NULL) {
/* Authentication Only */
@@ -70,7 +73,7 @@ null_crypto_set_session_parameters(
return 0;
}
- return -1;
+ return -ENOTSUP;
}
/** Process crypto operation for mbuf */
@@ -81,6 +84,14 @@ process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
/* set status as successful by default */
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0,
+ sizeof(struct null_crypto_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
+ op->sym->session = NULL;
+ }
+
/*
* if crypto session and operation are valid just enqueue the packet
* in the processed ring
@@ -89,26 +100,37 @@ process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
}
static struct null_crypto_session *
-get_session(struct null_crypto_qp *qp, struct rte_crypto_sym_op *op)
+get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
{
- struct null_crypto_session *sess;
-
- if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->session == NULL ||
- op->session->dev_type != RTE_CRYPTODEV_NULL_PMD))
+ struct null_crypto_session *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(sym_op->session != NULL))
+ sess = (struct null_crypto_session *)
+ get_session_private_data(
+ sym_op->session, cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct null_crypto_session *)op->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct null_crypto_session *)c_sess->_private;
-
- if (null_crypto_set_session_parameters(sess, op->xform) != 0)
- return NULL;
+ sess = (struct null_crypto_session *)_sess_private_data;
+
+ if (unlikely(null_crypto_set_session_parameters(sess,
+ sym_op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(sym_op->session, cryptodev_driver_id,
+ _sess_private_data);
}
return sess;
@@ -125,7 +147,7 @@ null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
int i, retval;
for (i = 0; i < nb_ops; i++) {
- sess = get_session(qp, ops[i]->sym);
+ sess = get_session(qp, ops[i]);
if (unlikely(sess == NULL))
goto enqueue_err;
@@ -166,6 +188,7 @@ static int cryptodev_null_remove(const char *name);
/** Create crypto device */
static int
cryptodev_null_create(const char *name,
+ struct rte_vdev_device *vdev,
struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
@@ -175,15 +198,16 @@ cryptodev_null_create(const char *name,
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct null_crypto_private),
- init_params->socket_id);
+ init_params->socket_id,
+ vdev);
if (dev == NULL) {
NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_NULL_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = null_crypto_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -235,7 +259,7 @@ cryptodev_null_probe(struct rte_vdev_device *dev)
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_null_create(name, &init_params);
+ return cryptodev_null_create(name, dev, &init_params);
}
/** Uninitialise null crypto device */
@@ -268,3 +292,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_null_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index 12c946c9..76153203 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,7 +56,7 @@ static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] =
.max = 0,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, },
}, },
},
@@ -72,11 +72,7 @@ static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] =
.max = 0,
.increment = 0
},
- .iv_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- }
+ .iv_size = { 0 }
}, },
}, }
},
@@ -151,7 +147,7 @@ null_crypto_pmd_info_get(struct rte_cryptodev *dev,
struct null_crypto_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -215,7 +211,7 @@ null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
static int
null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct null_crypto_private *internals = dev->data->dev_private;
struct null_crypto_qp *qp;
@@ -258,7 +254,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto qp_setup_cleanup;
}
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -302,33 +298,56 @@ null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a null crypto session from a crypto xform chain */
-static void *
+static int
null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
{
- int retval;
+ void *sess_private_data;
+ int ret;
if (unlikely(sess == NULL)) {
NULL_CRYPTO_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- retval = null_crypto_set_session_parameters(
- (struct null_crypto_session *)sess, xform);
- if (retval != 0) {
+
+ ret = null_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
NULL_CRYPTO_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-null_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
- void *sess)
+null_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- if (sess)
- memset(sess, 0, sizeof(struct null_crypto_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct null_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops pmd_ops = {
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
index acebc973..4d1c3c9e 100644
--- a/drivers/crypto/null/null_crypto_pmd_private.h
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -35,6 +35,9 @@
#include "rte_config.h"
+#define CRYPTODEV_NAME_NULL_PMD crypto_null
+/**< Null crypto PMD device name */
+
#define NULL_CRYPTO_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index f0c5ca3c..0bd5f98e 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,6 +34,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -44,6 +45,8 @@
#define DES_BLOCK_SIZE 8
+static uint8_t cryptodev_driver_id;
+
static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
/*----------------------------------------------------------------------------*/
@@ -88,6 +91,8 @@ openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
res = OPENSSL_CHAIN_CIPHER_AUTH;
}
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ res = OPENSSL_CHAIN_COMBINED;
}
return res;
@@ -183,21 +188,6 @@ get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
res = -EINVAL;
}
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
- switch (keylen) {
- case 16:
- *algo = EVP_aes_128_gcm();
- break;
- case 24:
- *algo = EVP_aes_192_gcm();
- break;
- case 32:
- *algo = EVP_aes_256_gcm();
- break;
- default:
- res = -EINVAL;
- }
- break;
default:
res = -EINVAL;
break;
@@ -253,6 +243,41 @@ get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
return res;
}
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_gcm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_gcm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_gcm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
/** Set session cipher parameters */
static int
openssl_set_session_cipher_parameters(struct openssl_session *sess,
@@ -263,12 +288,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
/* Select cipher key */
sess->cipher.key.length = xform->cipher.key.length;
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
/* Select cipher algo */
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
sess->cipher.mode = OPENSSL_CIPHER_LIB;
sess->cipher.algo = xform->cipher.algo;
sess->cipher.ctx = EVP_CIPHER_CTX_new();
@@ -308,7 +336,7 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
break;
default:
sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
- return -EINVAL;
+ return -ENOTSUP;
}
return 0;
@@ -326,11 +354,33 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
/* Select auth algo */
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_AES_GMAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
- /* Check additional condition for AES_GMAC/GCM */
- if (sess->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM)
- return -EINVAL;
sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->auth.iv.offset;
+ sess->iv.length = xform->auth.iv.length;
+
+ /*
+ * OpenSSL requires GMAC to be a GCM operation
+ * with no cipher data length
+ */
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ else
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ sess->cipher.key.length = xform->auth.key.length;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(RTE_CRYPTO_AEAD_AES_GCM,
+ sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->auth.key.data, xform->auth.key.length,
+ sess->cipher.key.data);
+
break;
case RTE_CRYPTO_AUTH_MD5:
@@ -362,9 +412,55 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
break;
default:
- return -EINVAL;
+ return -ENOTSUP;
}
+ sess->auth.digest_length = xform->auth.digest_length;
+
+ return 0;
+}
+
+/* Set session AEAD parameters */
+static int
+openssl_set_session_aead_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher direction */
+ sess->cipher.direction = xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = xform->aead.key.length;
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->aead.iv.offset;
+ sess->iv.length = xform->aead.iv.length;
+
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+ sess->auth.algo = xform->auth.algo;
+
+ /* Select auth algo */
+ switch (xform->aead.algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->aead_algo = xform->aead.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(sess->aead_algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->auth.aad_length = xform->aead.aad_length;
+ sess->auth.digest_length = xform->aead.digest_length;
+
return 0;
}
@@ -375,6 +471,8 @@ openssl_set_session_parameters(struct openssl_session *sess,
{
const struct rte_crypto_sym_xform *cipher_xform = NULL;
const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret;
sess->chain_order = openssl_get_chain_order(xform);
switch (sess->chain_order) {
@@ -392,25 +490,42 @@ openssl_set_session_parameters(struct openssl_session *sess,
auth_xform = xform;
cipher_xform = xform->next;
break;
+ case OPENSSL_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
default:
return -EINVAL;
}
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
/* cipher_xform must be check before auth_xform */
if (cipher_xform) {
- if (openssl_set_session_cipher_parameters(
- sess, cipher_xform)) {
+ ret = openssl_set_session_cipher_parameters(
+ sess, cipher_xform);
+ if (ret != 0) {
OPENSSL_LOG_ERR(
"Invalid/unsupported cipher parameters");
- return -EINVAL;
+ return ret;
}
}
if (auth_xform) {
- if (openssl_set_session_auth_parameters(sess, auth_xform)) {
+ ret = openssl_set_session_auth_parameters(sess, auth_xform);
+ if (ret != 0) {
OPENSSL_LOG_ERR(
"Invalid/unsupported auth parameters");
- return -EINVAL;
+ return ret;
+ }
+ }
+
+ if (aead_xform) {
+ ret = openssl_set_session_aead_parameters(sess, aead_xform);
+ if (ret != 0) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported AEAD parameters");
+ return ret;
}
}
@@ -445,29 +560,35 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
{
struct openssl_session *sess = NULL;
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
/* get existing session */
- if (likely(op->sym->session != NULL &&
- op->sym->session->dev_type ==
- RTE_CRYPTODEV_OPENSSL_PMD))
+ if (likely(op->sym->session != NULL))
sess = (struct openssl_session *)
- op->sym->session->_private;
- } else {
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
/* provide internal session */
void *_sess = NULL;
+ void *_sess_private_data = NULL;
- if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
- sess = (struct openssl_session *)
- ((struct rte_cryptodev_sym_session *)_sess)
- ->_private;
-
- if (unlikely(openssl_set_session_parameters(
- sess, op->sym->xform) != 0)) {
- rte_mempool_put(qp->sess_mp, _sess);
- sess = NULL;
- } else
- op->sym->session = _sess;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct openssl_session *)_sess_private_data;
+
+ if (unlikely(openssl_set_session_parameters(sess,
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
}
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
if (sess == NULL)
@@ -912,6 +1033,7 @@ process_openssl_combined_op
/* cipher */
uint8_t *dst = NULL, *iv, *tag, *aad;
int srclen, ivlen, aadlen, status = -1;
+ uint32_t offset;
/*
* Segmented destination buffer is not supported for
@@ -922,34 +1044,41 @@ process_openssl_combined_op
return;
}
- iv = op->sym->cipher.iv.data;
- ivlen = op->sym->cipher.iv.length;
- aad = op->sym->auth.aad.data;
- aadlen = op->sym->auth.aad.length;
-
- tag = op->sym->auth.digest.data;
- if (tag == NULL)
- tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
- op->sym->cipher.data.offset +
- op->sym->cipher.data.length);
-
- if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ ivlen = sess->iv.length;
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
srclen = 0;
- else {
- srclen = op->sym->cipher.data.length;
+ offset = op->sym->auth.data.offset;
+ aadlen = op->sym->auth.data.length;
+ aad = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+ tag = op->sym->auth.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + aadlen);
+ } else {
+ srclen = op->sym->aead.data.length;
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
- op->sym->cipher.data.offset);
+ op->sym->aead.data.offset);
+ offset = op->sym->aead.data.offset;
+ aad = op->sym->aead.aad.data;
+ aadlen = sess->auth.aad_length;
+ tag = op->sym->aead.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ offset + srclen);
}
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
status = process_openssl_auth_encryption_gcm(
- mbuf_src, op->sym->cipher.data.offset, srclen,
+ mbuf_src, offset, srclen,
aad, aadlen, iv, ivlen, sess->cipher.key.data,
dst, tag, sess->cipher.ctx,
sess->cipher.evp_algo);
else
status = process_openssl_auth_decryption_gcm(
- mbuf_src, op->sym->cipher.data.offset, srclen,
+ mbuf_src, offset, srclen,
aad, aadlen, iv, ivlen, sess->cipher.key.data,
dst, tag, sess->cipher.ctx,
sess->cipher.evp_algo);
@@ -986,7 +1115,8 @@ process_openssl_cipher_op
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
- iv = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
@@ -1027,7 +1157,8 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
- iv = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
block_size = DES_BLOCK_SIZE;
@@ -1085,7 +1216,8 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
dst, iv,
last_block_len, sess->cipher.bpi_ctx);
/* Prepare parameters for CBC mode op */
- iv = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
dst += last_block_len - srclen;
srclen -= last_block_len;
}
@@ -1116,7 +1248,7 @@ process_openssl_auth_op
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
- op->sym->auth.digest.length);
+ sess->auth.digest_length);
else {
dst = op->sym->auth.digest.data;
if (dst == NULL)
@@ -1144,11 +1276,11 @@ process_openssl_auth_op
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(dst, op->sym->auth.digest.data,
- op->sym->auth.digest.length) != 0) {
+ sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(mbuf_src, op->sym->auth.digest.length);
+ rte_pktmbuf_trim(mbuf_src, sess->auth.digest_length);
}
if (status != 0)
@@ -1195,9 +1327,12 @@ process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
}
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
openssl_reset_session(sess);
memset(sess, 0, sizeof(struct openssl_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
@@ -1275,15 +1410,16 @@ cryptodev_openssl_create(const char *name,
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
sizeof(struct openssl_private),
- init_params->socket_id);
+ init_params->socket_id,
+ vdev);
if (dev == NULL) {
OPENSSL_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_OPENSSL_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_openssl_pmd_ops;
/* register rx/tx burst functions for data path */
@@ -1329,7 +1465,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -1372,3 +1508,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_openssl_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 22a68730..8cdd0b2e 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -48,16 +48,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -78,7 +78,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 16,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -90,16 +90,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -120,7 +120,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 20,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -132,16 +132,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
.max = 28,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -162,7 +162,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 28,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -174,40 +174,40 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
{ /* SHA256 */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SHA256,
- .block_size = 64,
- .key_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- },
- .digest_size = {
- .min = 32,
- .max = 32,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .iv_size = { 0 }
}, }
- },
+ }, }
+ },
{ /* SHA384 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -216,16 +216,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
.max = 48,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -246,7 +246,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 48,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -258,16 +258,16 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 64,
.max = 64,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -288,7 +288,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 64,
.increment = 0
},
- .aad_size = { 0 }
+ .iv_size = { 0 }
}, }
}, }
},
@@ -332,12 +332,12 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
- { /* AES GCM (AUTH) */
+ { /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
@@ -353,27 +353,12 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.min = 0,
.max = 65535,
.increment = 1
- }
- }, }
- }, }
- },
- { /* AES GCM (CIPHER) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 8
},
.iv_size = {
.min = 12,
.max = 16,
.increment = 4
- }
+ },
}, }
}, }
},
@@ -394,9 +379,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.max = 16,
.increment = 0
},
- .aad_size = {
- .min = 8,
- .max = 65532,
+ .iv_size = {
+ .min = 12,
+ .max = 16,
.increment = 4
}
}, }
@@ -536,7 +521,7 @@ openssl_pmd_info_get(struct rte_cryptodev *dev,
struct openssl_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = openssl_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
@@ -602,7 +587,7 @@ openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
static int
openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct openssl_qp *qp = NULL;
@@ -627,7 +612,7 @@ openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->stats, 0, sizeof(qp->stats));
@@ -671,36 +656,57 @@ openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure the session from a crypto xform chain */
-static void *
+static int
openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
OPENSSL_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- if (openssl_set_session_parameters(
- sess, xform) != 0) {
+ ret = openssl_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
OPENSSL_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-openssl_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+openssl_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess) {
- openssl_reset_session(sess);
- memset(sess, 0, sizeof(struct openssl_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
}
}
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index 4d820c51..b7f74752 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,6 +36,8 @@
#include <openssl/evp.h>
#include <openssl/des.h>
+#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
+/**< Open SSL Crypto PMD device name */
#define OPENSSL_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
@@ -108,6 +110,15 @@ struct openssl_session {
enum openssl_chain_order chain_order;
/**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD algorithm */
+
/** Cipher Parameters */
struct {
enum rte_crypto_cipher_operation direction;
@@ -157,6 +168,11 @@ struct openssl_session {
/**< pointer to EVP context structure */
} hmac;
};
+
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< digest length */
} auth;
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 5c63406b..2c8e03c0 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -17,7 +17,7 @@
* qat-linux@intel.com
*
* BSD LICENSE
- * Copyright(c) 2015-2016 Intel Corporation.
+ * Copyright(c) 2015-2017 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -51,6 +51,7 @@
#include "icp_qat_hw.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
+#include "../qat_crypto.h"
/*
* Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
@@ -127,15 +128,17 @@ struct qat_session {
struct icp_qat_fw_la_bulk_req fw_req;
uint8_t aad_len;
struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
-};
-
-struct qat_alg_ablkcipher_cd {
- struct icp_qat_hw_cipher_algo_blk *cd;
- phys_addr_t cd_paddr;
- struct icp_qat_fw_la_bulk_req fw_req;
- struct qat_crypto_instance *inst;
- rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
};
int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
@@ -147,21 +150,13 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
- uint32_t add_auth_data_length,
+ uint32_t aad_length,
uint32_t digestsize,
unsigned int operation);
void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
enum qat_crypto_proto_flag proto_flags);
-void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
- int alg, const uint8_t *key,
- unsigned int keylen);
-
-void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
- int alg, const uint8_t *key,
- unsigned int keylen);
-
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_aes_docsisbpi_key(int key_len,
enum icp_qat_hw_cipher_algo *alg);
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 154e1ddd..2d16c9e2 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -17,7 +17,7 @@
* qat-linux@intel.com
*
* BSD LICENSE
- * Copyright(c) 2015-2016 Intel Corporation.
+ * Copyright(c) 2015-2017 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -121,6 +121,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -603,6 +606,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
} else {
total_key_size = cipherkeylen;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -661,7 +665,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
- uint32_t add_auth_data_length,
+ uint32_t aad_length,
uint32_t digestsize,
unsigned int operation)
{
@@ -810,13 +814,14 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
* in big-endian format. This field is 8 bytes
*/
auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16);
+ RTE_ALIGN_CEIL(aad_length, 16);
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
ICP_QAT_HW_GALOIS_128_STATE1_SZ +
ICP_QAT_HW_GALOIS_H_SZ);
- *aad_len = rte_bswap32(add_auth_data_length);
+ *aad_len = rte_bswap32(aad_length);
+ cdesc->aad_len = aad_length;
break;
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
@@ -837,8 +842,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
- auth_param->hash_state_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
break;
case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
hash->auth_config.config =
@@ -854,8 +858,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
cdesc->cd_cur_ptr += state1_size + state2_size
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
- auth_param->hash_state_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -868,6 +872,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_NULL:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_NULL);
+ state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 386aa453..1f52cabf 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,6 +60,7 @@
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
+#include <rte_cryptodev_pci.h>
#include <openssl/evp.h>
#include "qat_logs.h"
@@ -170,16 +171,19 @@ cipher_decrypt_err:
/** Creates a context in either AES or DES in ECB mode
* Depends on openssl libcrypto
*/
-static void *
+static int
bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
enum rte_crypto_cipher_operation direction __rte_unused,
- uint8_t *key)
+ uint8_t *key, void **ctx)
{
const EVP_CIPHER *algo = NULL;
- EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
- if (ctx == NULL)
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
goto ctx_init_err;
+ }
if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
algo = EVP_des_ecb();
@@ -187,15 +191,17 @@ bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
algo = EVP_aes_128_ecb();
/* IV will be ECB encrypted whether direction is encrypt or decrypt*/
- if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
goto ctx_init_err;
+ }
- return ctx;
+ return 0;
ctx_init_err:
- if (ctx != NULL)
- EVP_CIPHER_CTX_free(ctx);
- return NULL;
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
}
/** Frees a context previously created
@@ -213,25 +219,25 @@ adf_modulo(uint32_t data, uint32_t shift);
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie);
+ struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
-void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- void *session)
+void
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- struct qat_session *sess = session;
- phys_addr_t cd_paddr;
-
PMD_INIT_FUNC_TRACE();
- if (sess) {
- if (sess->bpi_ctx) {
- bpi_cipher_ctx_free(sess->bpi_ctx);
- sess->bpi_ctx = NULL;
- }
- cd_paddr = sess->cd_paddr;
- memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
- sess->cd_paddr = cd_paddr;
- } else
- PMD_DRV_LOG(ERR, "NULL session");
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ struct qat_session *s = (struct qat_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
static int
@@ -245,6 +251,14 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return ICP_QAT_FW_LA_CMD_AUTH;
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ }
+
if (xform->next == NULL)
return -1;
@@ -286,38 +300,37 @@ qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
return NULL;
}
-void *
+
+int
qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session)
{
- struct qat_session *session = session_private;
struct qat_pmd_private *internals = dev->data->dev_private;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
/* Get cipher xform from crypto xform chain */
cipher_xform = qat_get_cipher_xform(xform);
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
switch (cipher_xform->algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
if (qat_alg_validate_aes_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
case RTE_CRYPTO_CIPHER_AES_CTR:
if (qat_alg_validate_aes_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
@@ -326,6 +339,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
@@ -337,6 +351,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
@@ -345,6 +360,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_3des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -353,6 +369,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -361,38 +378,43 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_3des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
break;
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
- session->bpi_ctx = bpi_cipher_ctx_init(
+ ret = bpi_cipher_ctx_init(
cipher_xform->algo,
cipher_xform->op,
- cipher_xform->key.data);
- if (session->bpi_ctx == NULL) {
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
goto error_out;
}
if (qat_alg_validate_des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
- session->bpi_ctx = bpi_cipher_ctx_init(
+ ret = bpi_cipher_ctx_init(
cipher_xform->algo,
cipher_xform->op,
- cipher_xform->key.data);
- if (session->bpi_ctx == NULL) {
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
goto error_out;
}
if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -403,27 +425,30 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
PMD_DRV_LOG(ERR, "%s not supported on this device",
rte_crypto_cipher_algorithm_strings
[cipher_xform->algo]);
+ ret = -ENOTSUP;
goto error_out;
}
if (qat_alg_validate_zuc_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
break;
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_F8:
case RTE_CRYPTO_CIPHER_AES_XTS:
case RTE_CRYPTO_CIPHER_ARC4:
PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
cipher_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
cipher_xform->algo);
+ ret = -EINVAL;
goto error_out;
}
@@ -434,50 +459,119 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_aead_session_create_content_desc_cipher(session,
cipher_xform->key.data,
- cipher_xform->key.length))
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
goto error_out;
+ }
- return session;
+ return 0;
error_out:
if (session->bpi_ctx) {
bpi_cipher_ctx_free(session->bpi_ctx);
session->bpi_ctx = NULL;
}
- return NULL;
+ return ret;
}
-
-void *
+int
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_session *session = session_private;
+ int ret;
int qat_cmd_id;
PMD_INIT_FUNC_TRACE();
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
+ offsetof(struct qat_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
/* Get requested QAT command id */
qat_cmd_id = qat_get_cmd_id(xform);
if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
- goto error_out;
+ return -ENOTSUP;
}
session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
switch (session->qat_cmd) {
case ICP_QAT_FW_LA_CMD_CIPHER:
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
break;
case ICP_QAT_FW_LA_CMD_AUTH:
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_crypto_sym_configure_session_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_crypto_sym_configure_session_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_crypto_sym_configure_session_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_crypto_sym_configure_session_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_crypto_sym_configure_session_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_crypto_sym_configure_session_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
break;
case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
case ICP_QAT_FW_LA_CMD_TRNG_TEST:
@@ -490,30 +584,26 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
case ICP_QAT_FW_LA_CMD_DELIMITER:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
- goto error_out;
+ return -ENOTSUP;
default:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
- goto error_out;
+ return -ENOTSUP;
}
- return session;
-
-error_out:
- return NULL;
+ return 0;
}
-struct qat_session *
+int
qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session_private)
+ struct qat_session *session)
{
-
- struct qat_session *session = session_private;
struct rte_crypto_auth_xform *auth_xform = NULL;
- struct rte_crypto_cipher_xform *cipher_xform = NULL;
struct qat_pmd_private *internals = dev->data->dev_private;
auth_xform = qat_get_auth_xform(xform);
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -534,11 +624,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
break;
- case RTE_CRYPTO_AUTH_AES_GCM:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- break;
case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_alg_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
@@ -557,7 +651,7 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
PMD_DRV_LOG(ERR, "%s not supported on this device",
rte_crypto_auth_algorithm_strings
[auth_xform->algo]);
- goto error_out;
+ return -ENOTSUP;
}
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
break;
@@ -567,42 +661,149 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
auth_xform->algo);
- goto error_out;
+ return -ENOTSUP;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
auth_xform->algo);
- goto error_out;
+ return -EINVAL;
}
- cipher_xform = qat_get_cipher_xform(xform);
- if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
- (session->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
if (qat_alg_aead_session_create_content_desc_auth(session,
- cipher_xform->key.data,
- cipher_xform->key.length,
- auth_xform->add_auth_data_length,
+ key_data,
+ key_length,
+ 0,
auth_xform->digest_length,
auth_xform->op))
- goto error_out;
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_alg_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
+ aead_xform->algo);
+ return -ENOTSUP;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ RTE_CRYPTO_AUTH_OP_GENERATE))
+ return -EINVAL;
} else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
if (qat_alg_aead_session_create_content_desc_auth(session,
- auth_xform->key.data,
- auth_xform->key.length,
- auth_xform->add_auth_data_length,
- auth_xform->digest_length,
- auth_xform->op))
- goto error_out;
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ RTE_CRYPTO_AUTH_OP_VERIFY))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
}
- return session;
-error_out:
- return NULL;
+ session->digest_length = aead_xform->digest_length;
+ return 0;
}
unsigned qat_crypto_sym_get_session_private_size(
@@ -617,7 +818,8 @@ qat_bpicipher_preprocess(struct qat_session *ctx,
{
uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
if (last_block_len &&
ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
@@ -641,7 +843,8 @@ qat_bpicipher_preprocess(struct qat_session *ctx,
iv = last_block - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
@@ -670,7 +873,8 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
{
uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
if (last_block_len > 0 &&
ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
@@ -696,7 +900,8 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
iv = dst - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "BPI: src before post-process:", last_block,
@@ -752,12 +957,12 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
while (nb_ops_sent != nb_ops_possible) {
ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size]);
+ tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
/*
* This message cannot be enqueued,
- * decrease number of ops that wasnt sent
+ * decrease number of ops that wasn't sent
*/
rte_atomic16_sub(&tmp_qp->inflights16,
nb_ops_possible - nb_ops_sent);
@@ -808,7 +1013,10 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
struct qat_session *sess = (struct qat_session *)
- (rx_op->sym->session->_private);
+ get_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
if (sess->bpi_ctx)
qat_bpicipher_postprocess(sess, rx_op);
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -882,23 +1090,44 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
return 0;
}
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie)
+ struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
{
int ret = 0;
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
uint32_t cipher_len = 0, cipher_ofs = 0;
uint32_t auth_len = 0, auth_ofs = 0;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
uint8_t do_sgl = 0;
-
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
@@ -907,18 +1136,26 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
return -EINVAL;
}
#endif
- if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ ctx = (struct qat_session *)get_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)op->sym->session->_private;
+ if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
+ PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@@ -926,9 +1163,15 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- do_auth = 1;
- do_cipher = 1;
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
do_auth = 1;
do_cipher = 0;
@@ -970,26 +1213,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
cipher_ofs = op->sym->cipher.data.offset;
}
- /* copy IV into request if it fits */
- /*
- * If IV length is zero do not copy anything but still
- * use request descriptor embedded IV
- *
- */
- if (op->sym->cipher.iv.length) {
- if (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- op->sym->cipher.iv.phys_addr;
- }
- }
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
min_ofs = cipher_ofs;
}
@@ -1009,34 +1234,70 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_ofs = op->sym->auth.data.offset >> 3;
auth_len = op->sym->auth.data.length >> 3;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
- if (do_cipher) {
- auth_len = auth_len + auth_ofs + 1 -
- ICP_QAT_HW_KASUMI_BLK_SZ;
- auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
- } else {
- auth_len = auth_len + auth_ofs + 1;
- auth_ofs = 0;
- }
- }
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- auth_ofs = op->sym->cipher.data.offset;
- auth_len = op->sym->cipher.data.length;
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
+
}
min_ofs = auth_ofs;
auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
- auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+ }
+ if (do_aead) {
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = op->sym->aead.data.offset;
}
if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
@@ -1080,7 +1341,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
dst_buf_start = src_buf_start;
}
- if (do_cipher) {
+ if (do_cipher || do_aead) {
cipher_param->cipher_offset =
(uint32_t)rte_pktmbuf_mtophys_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
@@ -1089,7 +1350,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
cipher_param->cipher_offset = 0;
cipher_param->cipher_length = 0;
}
- if (do_auth) {
+
+ if (do_auth || do_aead) {
auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
@@ -1097,6 +1359,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_param->auth_off = 0;
auth_param->auth_len = 0;
}
+
qat_req->comn_mid.dst_length =
qat_req->comn_mid.src_length =
(cipher_param->cipher_offset + cipher_param->cipher_length)
@@ -1142,48 +1405,38 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- if (op->sym->cipher.iv.length == 12) {
- /*
- * For GCM a 12 bit IV is allowed,
- * but we need to inform the f/w
- */
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
- if (op->sym->cipher.data.length == 0) {
- /*
- * GMAC
- */
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- op->sym->auth.aad.phys_addr;
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- rte_pktmbuf_data_len(op->sym->m_src);
- cipher_param->cipher_length = 0;
- cipher_param->cipher_offset = 0;
- auth_param->u1.aad_adr = 0;
- auth_param->auth_len = op->sym->auth.aad.length;
- auth_param->auth_off = op->sym->auth.data.offset;
- auth_param->u2.aad_sz = 0;
- }
- }
-
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
rte_hexdump(stdout, "src_data:",
rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
rte_pktmbuf_data_len(op->sym->m_src));
- rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- op->sym->auth.digest.length);
- rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
- op->sym->auth.aad.length);
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
#endif
return 0;
}
@@ -1196,17 +1449,6 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
return data - mult;
}
-void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
-{
- struct rte_cryptodev_sym_session *sess = sym_sess;
- struct qat_session *s = (void *)sess->_private;
-
- PMD_INIT_FUNC_TRACE();
- s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
- offsetof(struct qat_session, cd) +
- offsetof(struct rte_cryptodev_sym_session, _private);
-}
-
int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
__rte_unused struct rte_cryptodev_config *config)
{
@@ -1240,8 +1482,8 @@ int qat_dev_close(struct rte_cryptodev *dev)
return 0;
}
-void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info)
+void qat_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -1253,7 +1495,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = internals->qat_dev_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ info->driver_id = cryptodev_qat_driver_id;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index b740d6b0..3f35a00e 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -39,6 +39,9 @@
#include "qat_crypto_capabilities.h"
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+/**< Intel QAT Symmetric Crypto PMD device name */
+
/*
* This macro rounds up a number to a be a multiple of
* the alignment when the alignment is a power of 2
@@ -47,6 +50,13 @@
(((num) + (align) - 1) & ~((align) - 1))
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+struct qat_session;
+
+enum qat_device_gen {
+ QAT_GEN1 = 1,
+ QAT_GEN2,
+};
+
/**
* Structure associated with each queue.
*/
@@ -74,6 +84,7 @@ struct qat_qp {
struct rte_mempool *op_cookie_pool;
void **op_cookies;
uint32_t nb_descriptors;
+ enum qat_device_gen qat_dev_gen;
} __rte_cache_aligned;
/** private data structure for each QAT device */
@@ -82,9 +93,13 @@ struct qat_pmd_private {
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
/**< Max number of sessions supported by device */
+ enum qat_device_gen qat_dev_gen;
+ /**< QAT device generation */
const struct rte_cryptodev_capabilities *qat_dev_capabilities;
};
+extern uint8_t cryptodev_qat_driver_id;
+
int qat_dev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config);
int qat_dev_start(struct rte_cryptodev *dev);
@@ -98,7 +113,8 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *rx_conf, int socket_id);
+ const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
+ struct rte_mempool *session_pool);
int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
uint16_t queue_pair_id);
@@ -109,26 +125,35 @@ qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
extern unsigned
qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
-extern void
-qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
-
-extern void *
+extern int
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+
+int
+qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private);
-struct qat_session *
+int
+qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_session *session);
+
+int
qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session_private);
+ struct qat_session *session);
-void *
+int
qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session);
extern void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
extern uint16_t
qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 1294f247..70120072 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -34,7 +34,7 @@
#ifndef _QAT_CRYPTO_CAPABILITIES_H_
#define _QAT_CRYPTO_CAPABILITIES_H_
-#define QAT_BASE_CPM16_SYM_CAPABILITIES \
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
{ /* SHA1 HMAC */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -43,16 +43,16 @@
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 20, \
.max = 20, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -64,16 +64,16 @@
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 28, \
.max = 28, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -85,16 +85,16 @@
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 32, \
.max = 32, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -104,18 +104,18 @@
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
{.auth = { \
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
- .block_size = 64, \
+ .block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 48, \
.max = 48, \
.increment = 0 \
- }, \
- .aad_size = { 0 } \
+ }, \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -127,16 +127,16 @@
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
.block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 64, \
.max = 64, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -148,16 +148,16 @@
.algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 8, \
+ .min = 1, \
.max = 64, \
- .increment = 8 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -178,16 +178,17 @@
.max = 16, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .aad_size = { 0 }, \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
- { /* AES GCM (AUTH) */ \
+ { /* AES GCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
- {.auth = { \
- .algo = RTE_CRYPTO_AUTH_AES_GCM, \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
.block_size = 16, \
.key_size = { \
.min = 16, \
@@ -203,7 +204,12 @@
.min = 0, \
.max = 240, \
.increment = 1 \
- } \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ }, \
}, } \
}, } \
}, \
@@ -224,10 +230,10 @@
.max = 16, \
.increment = 4 \
}, \
- .aad_size = { \
- .min = 1, \
- .max = 65535, \
- .increment = 1 \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
} \
}, } \
}, } \
@@ -249,7 +255,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
+ .iv_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
@@ -257,26 +263,6 @@
}, } \
}, } \
}, \
- { /* AES GCM (CIPHER) */ \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
- {.cipher = { \
- .algo = RTE_CRYPTO_CIPHER_AES_GCM, \
- .block_size = 16, \
- .key_size = { \
- .min = 16, \
- .max = 32, \
- .increment = 8 \
- }, \
- .iv_size = { \
- .min = 12, \
- .max = 12, \
- .increment = 0 \
- } \
- }, } \
- }, } \
- }, \
{ /* AES CBC */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -374,7 +360,7 @@
.max = 0, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, }, \
}, }, \
}, \
@@ -435,11 +421,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
- .min = 8, \
- .max = 8, \
- .increment = 0 \
- } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -524,7 +506,7 @@
}, } \
}
-#define QAT_EXTRA_CPM17_SYM_CAPABILITIES \
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
{ /* ZUC (EEA3) */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -562,7 +544,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
+ .iv_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index a358ccd7..5048d214 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -36,6 +36,7 @@
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
#include <rte_atomic.h>
#include <rte_prefetch.h>
@@ -133,7 +134,7 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
{
struct qat_qp *qp;
struct rte_pci_device *pci_dev;
@@ -205,7 +206,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
adf_configure_queues(qp);
adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
- dev->driver->pci_drv.driver.name, dev->data->dev_id,
+ pci_dev->driver->driver.name, dev->data->dev_id,
queue_pair_id);
qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
@@ -242,6 +243,11 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
offsetof(struct qat_crypto_op_cookie,
qat_sgl_list_dst);
}
+
+ struct qat_pmd_private *internals
+ = dev->data->dev_private;
+ qp->qat_dev_gen = internals->qat_dev_gen;
+
dev->data->queue_pairs[queue_pair_id] = qp;
return 0;
@@ -355,11 +361,13 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
return -EINVAL;
}
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
/*
* Allocate a memzone for the queue - create a unique name.
*/
snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
+ pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
socket_id);
@@ -408,7 +416,6 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
queue->queue_size);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
io_addr = pci_dev->mem_resource[0].addr;
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 1bdd30da..7d56fca4 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -35,18 +35,21 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_pci.h>
#include "qat_crypto.h"
#include "qat_logs.h"
-static const struct rte_cryptodev_capabilities qat_cpm16_capabilities[] = {
- QAT_BASE_CPM16_SYM_CAPABILITIES,
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-static const struct rte_cryptodev_capabilities qat_cpm17_capabilities[] = {
- QAT_BASE_CPM16_SYM_CAPABILITIES,
- QAT_EXTRA_CPM17_SYM_CAPABILITIES,
+static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -70,7 +73,6 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.session_get_size = qat_crypto_sym_get_session_private_size,
.session_configure = qat_crypto_sym_configure_session,
- .session_initialize = qat_crypto_sym_session_init,
.session_clear = qat_crypto_sym_clear_session
};
@@ -95,8 +97,7 @@ static const struct rte_pci_id pci_id_qat_map[] = {
};
static int
-crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_drv,
- struct rte_cryptodev *cryptodev)
+crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
{
struct qat_pmd_private *internals;
@@ -106,7 +107,7 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
@@ -121,12 +122,14 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
case 0x0443:
- internals->qat_dev_capabilities = qat_cpm16_capabilities;
+ internals->qat_dev_gen = QAT_GEN1;
+ internals->qat_dev_capabilities = qat_gen1_capabilities;
break;
case 0x37c9:
case 0x19e3:
case 0x6f55:
- internals->qat_dev_capabilities = qat_cpm17_capabilities;
+ internals->qat_dev_gen = QAT_GEN2;
+ internals->qat_dev_capabilities = qat_gen2_capabilities;
break;
default:
PMD_DRV_LOG(ERR,
@@ -147,17 +150,25 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
return 0;
}
-static struct rte_cryptodev_driver rte_qat_pmd = {
- .pci_drv = {
- .id_table = pci_id_qat_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_cryptodev_pci_probe,
- .remove = rte_cryptodev_pci_remove,
- },
- .cryptodev_init = crypto_qat_dev_init,
- .dev_private_size = sizeof(struct qat_pmd_private),
+static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_cryptodev_pci_generic_probe(pci_dev,
+ sizeof(struct qat_pmd_private), crypto_qat_dev_init);
+}
+
+static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_cryptodev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = crypto_qat_pci_probe,
+ .remove = crypto_qat_pci_remove
};
-RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-
+RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_qat_pmd, cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
index c273e784..b045410c 100644
--- a/drivers/crypto/scheduler/Makefile
+++ b/drivers/crypto/scheduler/Makefile
@@ -56,5 +56,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_multicore.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 319dcf0a..df8634ad 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -198,7 +198,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -226,12 +226,12 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
rte_cryptodev_info_get(slave_id, &dev_info);
slave->dev_id = slave_id;
- slave->dev_type = dev_info.dev_type;
+ slave->driver_id = dev_info.driver_id;
sched_ctx->nb_slaves++;
if (update_scheduler_capability(sched_ctx) < 0) {
slave->dev_id = 0;
- slave->dev_type = 0;
+ slave->driver_id = 0;
sched_ctx->nb_slaves--;
CS_LOG_ERR("capabilities update failed");
@@ -257,7 +257,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -314,7 +314,7 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -351,6 +351,13 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -1;
}
break;
+ case CDEV_SCHED_MODE_MULTICORE:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ multicore_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
default:
CS_LOG_ERR("Not yet supported");
return -ENOTSUP;
@@ -359,13 +366,6 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return 0;
}
-int
-rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
- enum rte_cryptodev_scheduler_mode mode)
-{
- return rte_cryptodev_scheduler_mode_set(scheduler_id, mode);
-}
-
enum rte_cryptodev_scheduler_mode
rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
{
@@ -377,7 +377,7 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -387,12 +387,6 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
return sched_ctx->mode;
}
-enum rte_cryptodev_scheduler_mode
-rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id)
-{
- return rte_cryptodev_scheduler_mode_get(scheduler_id);
-}
-
int
rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
uint32_t enable_reorder)
@@ -405,7 +399,7 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -433,7 +427,7 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -455,7 +449,7 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -467,8 +461,22 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
sched_ctx = dev->data->dev_private;
+ if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", scheduler->name,
+ RTE_CRYPTODEV_NAME_MAX_LEN);
+ return -EINVAL;
+ }
strncpy(sched_ctx->name, scheduler->name,
RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ if (strlen(scheduler->description) >
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid description %s, should be less than "
+ "%u bytes.\n", scheduler->description,
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
+ return -EINVAL;
+ }
strncpy(sched_ctx->description, scheduler->description,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
@@ -512,7 +520,7 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
return -ENOTSUP;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
@@ -580,7 +588,7 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
return -EINVAL;
}
- if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ if (dev->driver_id != cryptodev_driver_id) {
CS_LOG_ERR("Operation not supported");
return -ENOTSUP;
}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 2ba6e470..df22f2a9 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -58,12 +58,17 @@ extern "C" {
#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
#endif
+/** Maximum number of multi-core worker cores */
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
+
/** Round-robin scheduling mode string */
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
/** Packet-size based distribution scheduling mode string */
#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
/** Fail-over scheduling mode string */
#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
+/** multi-core scheduling mode string */
+#define SCHEDULER_MODE_NAME_MULTI_CORE multi-core
/**
* Crypto scheduler PMD operation modes
@@ -78,6 +83,8 @@ enum rte_cryptodev_scheduler_mode {
CDEV_SCHED_MODE_PKT_SIZE_DISTR,
/** Fail-over mode */
CDEV_SCHED_MODE_FAILOVER,
+ /** multi-core mode */
+ CDEV_SCHED_MODE_MULTICORE,
CDEV_SCHED_MODE_COUNT /**< number of modes */
};
@@ -116,6 +123,7 @@ struct rte_cryptodev_scheduler;
* - 0 if the scheduler is successfully loaded
* - -ENOTSUP if the operation is not supported.
* - -EBUSY if device is started.
+ * - -EINVAL if input values are invalid.
*/
int
rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
@@ -186,38 +194,6 @@ enum rte_cryptodev_scheduler_mode
rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id);
/**
- * @deprecated
- * Set the scheduling mode
- *
- * @param scheduler_id
- * The target scheduler device ID
- * @param mode
- * The scheduling mode
- *
- * @return
- * 0 if attaching successful, negative integer if otherwise.
- */
-__rte_deprecated
-int
-rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
- enum rte_cryptodev_scheduler_mode mode);
-
-/**
- * @deprecated
- * Get the current scheduling mode
- *
- * @param scheduler_id
- * The target scheduler device ID
- *
- * @return
- * If successful, returns the scheduling mode, negative integer
- * otherwise
- */
-__rte_deprecated
-enum rte_cryptodev_scheduler_mode
-rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id);
-
-/**
* Set the crypto ops reordering feature on/off
*
* @param scheduler_id
@@ -327,6 +303,8 @@ extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
/** Fail-over mode scheduler */
extern struct rte_cryptodev_scheduler *failover_scheduler;
+/** multi-core mode scheduler */
+extern struct rte_cryptodev_scheduler *multicore_scheduler;
#ifdef __cplusplus
}
diff --git a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
index 0a8b4716..5c43127c 100644
--- a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
+++ b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -4,8 +4,6 @@ DPDK_17.02 {
rte_cryptodev_scheduler_load_user_scheduler;
rte_cryptodev_scheduler_slave_attach;
rte_cryptodev_scheduler_slave_detach;
- rte_crpytodev_scheduler_mode_set;
- rte_crpytodev_scheduler_mode_get;
rte_cryptodev_scheduler_ordering_set;
rte_cryptodev_scheduler_ordering_get;
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 2471a5f1..2aa13f8e 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -48,58 +48,19 @@ struct fo_scheduler_qp_ctx {
uint8_t deq_idx;
};
-static inline uint16_t __attribute__((always_inline))
-failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
+static __rte_always_inline uint16_t
+failover_slave_enqueue(struct scheduler_slave *slave,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
uint16_t i, processed_ops;
- struct rte_cryptodev_sym_session *sessions[nb_ops];
- struct scheduler_session *sess0, *sess1, *sess2, *sess3;
for (i = 0; i < nb_ops && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
- for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
- rte_prefetch0(ops[i + 4]->sym->session);
- rte_prefetch0(ops[i + 5]->sym->session);
- rte_prefetch0(ops[i + 6]->sym->session);
- rte_prefetch0(ops[i + 7]->sym->session);
-
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sess1 = (struct scheduler_session *)
- ops[i+1]->sym->session->_private;
- sess2 = (struct scheduler_session *)
- ops[i+2]->sym->session->_private;
- sess3 = (struct scheduler_session *)
- ops[i+3]->sym->session->_private;
-
- sessions[i] = ops[i]->sym->session;
- sessions[i + 1] = ops[i + 1]->sym->session;
- sessions[i + 2] = ops[i + 2]->sym->session;
- sessions[i + 3] = ops[i + 3]->sym->session;
-
- ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i + 1]->sym->session = sess1->sessions[slave_idx];
- ops[i + 2]->sym->session = sess2->sessions[slave_idx];
- ops[i + 3]->sym->session = sess3->sessions[slave_idx];
- }
-
- for (; i < nb_ops; i++) {
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
- ops[i]->sym->session = sess0->sessions[slave_idx];
- }
-
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id, ops, nb_ops);
slave->nb_inflight_cops += processed_ops;
- if (unlikely(processed_ops < nb_ops))
- for (i = processed_ops; i < nb_ops; i++)
- ops[i]->sym->session = sessions[i];
-
return processed_ops;
}
@@ -114,11 +75,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return 0;
enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
- PRIMARY_SLAVE_IDX, ops, nb_ops);
+ ops, nb_ops);
if (enqueued_ops < nb_ops)
enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
- SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
+ &ops[enqueued_ops],
nb_ops - enqueued_ops);
return enqueued_ops;
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
new file mode 100644
index 00000000..0cd5bce5
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -0,0 +1,415 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
+#define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
+
+#define MC_SCHED_BUFFER_SIZE 32
+
+#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
+
+/** multi-core scheduler context */
+struct mc_scheduler_ctx {
+ uint32_t num_workers; /**< Number of workers polling */
+ uint32_t stop_signal;
+
+ struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+};
+
+struct mc_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_worker_idx;
+ uint32_t last_deq_worker_idx;
+
+ struct mc_scheduler_ctx *mc_private_ctx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_queue_ops;
+ processed_ops += nb_queue_ops;
+
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+ mc_qp_ctx->last_enq_worker_idx = worker_idx;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+ uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_deq_ops;
+ processed_ops += nb_deq_ops;
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+
+ mc_qp_ctx->last_deq_worker_idx = worker_idx;
+
+ return processed_ops;
+
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+
+ if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
+ break;
+
+ op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq) {
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+ }
+
+ return nb_ops_deqd;
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+mc_scheduler_worker(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ struct rte_ring *enq_ring;
+ struct rte_ring *deq_ring;
+ uint32_t core_id = rte_lcore_id();
+ int i, worker_idx = -1;
+ struct scheduler_slave *slave;
+ struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
+ struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
+ uint16_t processed_ops;
+ uint16_t pending_enq_ops = 0;
+ uint16_t pending_enq_ops_idx = 0;
+ uint16_t pending_deq_ops = 0;
+ uint16_t pending_deq_ops_idx = 0;
+ uint16_t inflight_ops = 0;
+ const uint8_t reordering_enabled = sched_ctx->reordering_enabled;
+
+ for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
+ if (sched_ctx->wc_pool[i] == core_id) {
+ worker_idx = i;
+ break;
+ }
+ }
+ if (worker_idx == -1) {
+ CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+ return -1;
+ }
+
+ slave = &sched_ctx->slaves[worker_idx];
+ enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+
+ while (!mc_ctx->stop_signal) {
+ if (pending_enq_ops) {
+ processed_ops =
+ rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, &enq_ops[pending_enq_ops_idx],
+ pending_enq_ops);
+ pending_enq_ops -= processed_ops;
+ pending_enq_ops_idx += processed_ops;
+ inflight_ops += processed_ops;
+ } else {
+ processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
+ MC_SCHED_BUFFER_SIZE, NULL);
+ if (processed_ops) {
+ pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
+ slave->dev_id, slave->qp_id,
+ enq_ops, processed_ops);
+ pending_enq_ops = processed_ops - pending_enq_ops_idx;
+ inflight_ops += pending_enq_ops_idx;
+ }
+ }
+
+ if (pending_deq_ops) {
+ processed_ops = rte_ring_enqueue_burst(
+ deq_ring, (void *)&deq_ops[pending_deq_ops_idx],
+ pending_deq_ops, NULL);
+ pending_deq_ops -= processed_ops;
+ pending_deq_ops_idx += processed_ops;
+ } else if (inflight_ops) {
+ processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ if (processed_ops) {
+ inflight_ops -= processed_ops;
+ if (reordering_enabled) {
+ uint16_t j;
+
+ for (j = 0; j < processed_ops; j++) {
+ deq_ops[j]->status |=
+ CRYPTO_OP_STATUS_BIT_COMPLETE;
+ }
+ } else {
+ pending_deq_ops_idx = rte_ring_enqueue_burst(
+ deq_ring, (void *)deq_ops, processed_ops,
+ NULL);
+ pending_deq_ops = processed_ops -
+ pending_deq_ops_idx;
+ }
+ }
+ }
+
+ rte_pause();
+ }
+
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 0;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_remote_launch(
+ (lcore_function_t *)mc_scheduler_worker, dev,
+ sched_ctx->wc_pool[i]);
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(mc_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ mc_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ mc_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ mc_qp_ctx->last_enq_worker_idx = 0;
+ mc_qp_ctx->last_deq_worker_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 1;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx;
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+
+ mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
+ rte_socket_id());
+ if (!mc_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ mc_qp_ctx->mc_private_ctx = mc_ctx;
+ qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
+
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx;
+ uint16_t i;
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!mc_ctx) {
+ CS_LOG_ERR("failed allocate memory");
+ return -ENOMEM;
+ }
+
+ mc_ctx->num_workers = sched_ctx->nb_wc;
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ char r_name[16];
+
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u", i);
+ return -1;
+ }
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u", i);
+ return -1;
+ }
+ }
+
+ sched_ctx->private_ctx = (void *)mc_ctx;
+
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler mc_scheduler = {
+ .name = "multicore-scheduler",
+ .description = "scheduler which will run burst across multiple cpu cores",
+ .mode = CDEV_SCHED_MODE_MULTICORE,
+ .ops = &scheduler_mc_ops
+};
+
+struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 6b628dfa..1dd1bc32 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -67,7 +67,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
struct scheduler_qp_ctx *qp_ctx = qp;
struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
- struct scheduler_session *sess;
uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
psd_qp_ctx->primary_slave.nb_inflight_cops,
psd_qp_ctx->secondary_slave.nb_inflight_cops
@@ -97,8 +96,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
rte_prefetch0(ops[i + 7]->sym);
rte_prefetch0(ops[i + 7]->sym->session);
- sess = (struct scheduler_session *)
- ops[i]->sym->session->_private;
/* job_len is initialized as cipher data length, once
* it is 0, equals to auth data length
*/
@@ -118,11 +115,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
- ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+1]->sym->session->_private;
job_len = ops[i+1]->sym->cipher.data.length;
job_len += (ops[i+1]->sym->cipher.data.length == 0) *
ops[i+1]->sym->auth.data.length;
@@ -135,11 +129,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
- ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+2]->sym->session->_private;
job_len = ops[i+2]->sym->cipher.data.length;
job_len += (ops[i+2]->sym->cipher.data.length == 0) *
ops[i+2]->sym->auth.data.length;
@@ -152,12 +143,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
- ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
- sess = (struct scheduler_session *)
- ops[i+3]->sym->session->_private;
-
job_len = ops[i+3]->sym->cipher.data.length;
job_len += (ops[i+3]->sym->cipher.data.length == 0) *
ops[i+3]->sym->auth.data.length;
@@ -170,14 +157,10 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
- ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
}
for (; i < nb_ops; i++) {
- sess = (struct scheduler_session *)
- ops[i]->sym->session->_private;
-
job_len = ops[i]->sym->cipher.data.length;
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
@@ -190,7 +173,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
- ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
p_enq_op->pos++;
}
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 0b63c20b..400fc4f1 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -33,6 +33,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -41,11 +42,14 @@
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
+uint8_t cryptodev_driver_id;
+
struct scheduler_init_params {
struct rte_crypto_vdev_init_params def_p;
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
+ uint64_t wcmask;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@@ -57,6 +61,8 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
+#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
@@ -65,7 +71,9 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_ORDERING,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- RTE_CRYPTODEV_VDEV_SOCKET_ID
+ RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ RTE_CRYPTODEV_VDEV_COREMASK,
+ RTE_CRYPTODEV_VDEV_CORELIST
};
struct scheduler_parse_map {
@@ -79,7 +87,9 @@ const struct scheduler_parse_map scheduler_mode_map[] = {
{RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
CDEV_SCHED_MODE_PKT_SIZE_DISTR},
{RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
- CDEV_SCHED_MODE_FAILOVER}
+ CDEV_SCHED_MODE_FAILOVER},
+ {RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE),
+ CDEV_SCHED_MODE_MULTICORE}
};
const struct scheduler_parse_map scheduler_ordering_map[] = {
@@ -89,7 +99,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = {
static int
cryptodev_scheduler_create(const char *name,
- struct scheduler_init_params *init_params)
+ struct rte_vdev_device *vdev,
+ struct scheduler_init_params *init_params)
{
struct rte_cryptodev *dev;
struct scheduler_ctx *sched_ctx;
@@ -101,22 +112,38 @@ cryptodev_scheduler_create(const char *name,
sizeof(init_params->def_p.name),
"%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->def_p.name,
+ dev = rte_cryptodev_vdev_pmd_init(init_params->def_p.name,
sizeof(struct scheduler_ctx),
- init_params->def_p.socket_id);
+ init_params->def_p.socket_id,
+ vdev);
if (dev == NULL) {
CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
name);
return -EFAULT;
}
- dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
sched_ctx = dev->data->dev_private;
sched_ctx->max_nb_queue_pairs =
init_params->def_p.max_nb_queue_pairs;
+ if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
+ uint16_t i;
+
+ sched_ctx->nb_wc = 0;
+
+ for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
+ if (init_params->wcmask & (1ULL << i)) {
+ sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
+ RTE_LOG(INFO, PMD,
+ " Worker core[%u]=%u added\n",
+ sched_ctx->nb_wc-1, i);
+ }
+ }
+ }
+
if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
init_params->mode < CDEV_SCHED_MODE_COUNT) {
ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
@@ -219,22 +246,6 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
return 0;
}
-static uint8_t
-number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
-
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
-
/** Parse integer from integer argument */
static int
parse_integer_arg(const char *key __rte_unused,
@@ -251,6 +262,43 @@ parse_integer_arg(const char *key __rte_unused,
return 0;
}
+/** Parse integer from hexadecimal integer argument */
+static int
+parse_coremask_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->wcmask = strtoull(value, NULL, 16);
+
+ return 0;
+}
+
+/** Parse integer from list of integers argument */
+static int
+parse_corelist_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->wcmask = 0ULL;
+
+ const char *token = value;
+
+ while (isdigit(token[0])) {
+ char *rval;
+ unsigned int core = strtoul(token, &rval, 10);
+
+ params->wcmask |= 1ULL << core;
+ token = (const char *)rval;
+ if (token[0] == '\0')
+ break;
+ token++;
+ }
+
+ return 0;
+}
+
/** Parse name */
static int
parse_name_arg(const char *key __rte_unused,
@@ -277,7 +325,7 @@ parse_slave_arg(const char *key __rte_unused,
{
struct scheduler_init_params *param = extra_args;
- if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
CS_LOG_ERR("Too many slaves.\n");
return -ENOMEM;
}
@@ -370,6 +418,18 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK,
+ &parse_coremask_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST,
+ &parse_corelist_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
&parse_name_arg,
&params->def_p);
@@ -390,12 +450,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
&parse_ordering_arg, params);
if (ret < 0)
goto free_kvlist;
-
- if (params->def_p.socket_id >= number_of_sockets()) {
- CDEV_LOG_ERR("Invalid socket id specified to create "
- "the virtual crypto device on");
- goto free_kvlist;
- }
}
free_kvlist:
@@ -437,8 +491,12 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
if (init_params.def_p.name[0] != '\0')
RTE_LOG(INFO, PMD, " User defined name = %s\n",
init_params.def_p.name);
+ if (init_params.wcmask != 0)
+ RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
+ init_params.wcmask);
return cryptodev_scheduler_create(name,
+ vdev,
&init_params);
}
@@ -454,3 +512,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_sessions=<int> "
"socket_id=<int> "
"slave=<name>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_scheduler_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 2b5858df..d3795346 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -37,6 +37,7 @@
#include <rte_dev.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_reorder.h>
#include "scheduler_pmd_private.h"
@@ -368,7 +369,7 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
max_nb_sessions;
}
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = sched_ctx->capabilities;
dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
@@ -398,7 +399,8 @@ scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
/** Setup a queue pair */
static int
scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct scheduler_qp_ctx *qp_ctx;
@@ -420,8 +422,13 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+ /*
+ * All slaves will share the same session mempool
+ * for session-less operations, so the objects
+ * must be big enough for all the drivers used.
+ */
ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
- qp_conf, socket_id);
+ qp_conf, socket_id, session_pool);
if (ret < 0)
return ret;
}
@@ -483,37 +490,41 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev)
static uint32_t
scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
- return sizeof(struct scheduler_session);
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t i = 0;
+ uint32_t max_priv_sess_size = 0;
+
+ /* Check what is the maximum private session size for all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
+ uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+
+ if (max_priv_sess_size < priv_sess_size)
+ max_priv_sess_size = priv_sess_size;
+ }
+
+ return max_priv_sess_size;
}
static int
-config_slave_sess(struct scheduler_ctx *sched_ctx,
- struct rte_crypto_sym_xform *xform,
- struct scheduler_session *sess,
- uint32_t create)
+scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
+ int ret;
for (i = 0; i < sched_ctx->nb_slaves; i++) {
struct scheduler_slave *slave = &sched_ctx->slaves[i];
- if (sess->sessions[i]) {
- if (create)
- continue;
- /* !create */
- sess->sessions[i] = rte_cryptodev_sym_session_free(
- slave->dev_id, sess->sessions[i]);
- } else {
- if (!create)
- continue;
- /* create */
- sess->sessions[i] =
- rte_cryptodev_sym_session_create(
- slave->dev_id, xform);
- if (!sess->sessions[i]) {
- config_slave_sess(sched_ctx, NULL, sess, 0);
- return -1;
- }
+ ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
+ xform, mempool);
+ if (ret < 0) {
+ CS_LOG_ERR("unabled to config sym session");
+ return ret;
}
}
@@ -523,27 +534,17 @@ config_slave_sess(struct scheduler_ctx *sched_ctx,
/** Clear the memory of session so it doesn't leave key material behind */
static void
scheduler_pmd_session_clear(struct rte_cryptodev *dev,
- void *sess)
+ struct rte_cryptodev_sym_session *sess)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
- config_slave_sess(sched_ctx, NULL, sess, 0);
-
- memset(sess, 0, sizeof(struct scheduler_session));
-}
-
-static void *
-scheduler_pmd_session_configure(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *sess)
-{
- struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ /* Clear private data of slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
- if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
- CS_LOG_ERR("unabled to config sym session");
- return NULL;
+ rte_cryptodev_sym_session_clear(slave->dev_id, sess);
}
-
- return sess;
}
struct rte_cryptodev_ops scheduler_pmd_ops = {
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index 421dae37..e606716a 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -36,6 +36,9 @@
#include "rte_cryptodev_scheduler.h"
+#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+/**< Scheduler Crypto PMD device name */
+
#define PER_SLAVE_BUFF_SIZE (256)
#define CS_LOG_ERR(fmt, args...) \
@@ -63,7 +66,7 @@ struct scheduler_slave {
uint16_t qp_id;
uint32_t nb_inflight_cops;
- enum rte_cryptodev_type dev_type;
+ uint8_t driver_id;
};
struct scheduler_ctx {
@@ -86,6 +89,8 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+ uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
int nb_init_slaves;
@@ -100,12 +105,10 @@ struct scheduler_qp_ctx {
uint32_t seqn;
} __rte_cache_aligned;
-struct scheduler_session {
- struct rte_cryptodev_sym_session *sessions[
- RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
-};
-static inline uint16_t __attribute__((always_inline))
+extern uint8_t cryptodev_driver_id;
+
+static __rte_always_inline uint16_t
get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
{
uint32_t count = rte_ring_free_count(order_ring);
@@ -113,7 +116,7 @@ get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
return count > nb_ops ? nb_ops : count;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
scheduler_order_insert(struct rte_ring *order_ring,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
@@ -125,7 +128,7 @@ scheduler_order_insert(struct rte_ring *order_ring,
op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
} while (0)
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
scheduler_order_drain(struct rte_ring *order_ring,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 01162764..4a847281 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -52,8 +52,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
- struct rte_cryptodev_sym_session *sessions[nb_ops];
- struct scheduler_session *sess0, *sess1, *sess2, *sess3;
if (unlikely(nb_ops == 0))
return 0;
@@ -61,39 +59,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
for (i = 0; i < nb_ops && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
- for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sess1 = (struct scheduler_session *)
- ops[i+1]->sym->session->_private;
- sess2 = (struct scheduler_session *)
- ops[i+2]->sym->session->_private;
- sess3 = (struct scheduler_session *)
- ops[i+3]->sym->session->_private;
-
- sessions[i] = ops[i]->sym->session;
- sessions[i + 1] = ops[i + 1]->sym->session;
- sessions[i + 2] = ops[i + 2]->sym->session;
- sessions[i + 3] = ops[i + 3]->sym->session;
-
- ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i + 1]->sym->session = sess1->sessions[slave_idx];
- ops[i + 2]->sym->session = sess2->sessions[slave_idx];
- ops[i + 3]->sym->session = sess3->sessions[slave_idx];
-
- rte_prefetch0(ops[i + 4]->sym->session);
- rte_prefetch0(ops[i + 5]->sym->session);
- rte_prefetch0(ops[i + 6]->sym->session);
- rte_prefetch0(ops[i + 7]->sym->session);
- }
-
- for (; i < nb_ops; i++) {
- sess0 = (struct scheduler_session *)
- ops[i]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
- ops[i]->sym->session = sess0->sessions[slave_idx];
- }
-
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id, ops, nb_ops);
@@ -102,12 +67,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
rr_qp_ctx->last_enq_slave_idx += 1;
rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
- /* recover session if enqueue is failed */
- if (unlikely(processed_ops < nb_ops)) {
- for (i = processed_ops; i < nb_ops; i++)
- ops[i]->sym->session = sessions[i];
- }
-
return processed_ops;
}
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 960956ca..dad45068 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -46,6 +47,8 @@
#define SNOW3G_MAX_BURST 8
#define BYTE_LEN 8
+static uint8_t cryptodev_driver_id;
+
/** Get xform chain order. */
static enum snow3g_operation
snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -108,13 +111,20 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
case SNOW3G_OP_NOT_SUPPORTED:
default:
SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
- return -EINVAL;
+ return -ENOTSUP;
}
if (cipher_xform) {
/* Only SNOW 3G UEA2 supported */
if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG_ERR("Wrong IV length");
return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
/* Initialize key */
sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
&sess->pKeySched_cipher);
@@ -123,8 +133,21 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
if (auth_xform) {
/* Only SNOW 3G UIA2 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
+ SNOW3G_LOG_ERR("Wrong digest length");
return -EINVAL;
+ }
+
sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
+ SNOW3G_LOG_ERR("Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
/* Initialize key */
sso_snow3g_init_key_sched(auth_xform->auth.key.data,
&sess->pKeySched_hash);
@@ -140,27 +163,41 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
static struct snow3g_session *
snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
{
- struct snow3g_session *sess;
-
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_SNOW3G_PMD))
+ struct snow3g_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct snow3g_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct snow3g_session *)op->sym->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct snow3g_session *)c_sess->_private;
+ sess = (struct snow3g_session *)_sess_private_data;
if (unlikely(snow3g_set_session_parameters(sess,
- op->sym->xform) != 0))
- return NULL;
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
return sess;
}
@@ -173,17 +210,10 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
unsigned i;
uint8_t processed_ops = 0;
uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
- uint8_t *IV[SNOW3G_MAX_BURST];
+ uint8_t *iv[SNOW3G_MAX_BURST];
uint32_t num_bytes[SNOW3G_MAX_BURST];
for (i = 0; i < num_ops; i++) {
- /* Sanity checks. */
- if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("iv");
- break;
- }
-
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?
@@ -191,13 +221,14 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
(ops[i]->sym->cipher.data.offset >> 3) :
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
- IV[i] = ops[i]->sym->cipher.iv.data;
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
processed_ops++;
}
- sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, IV, src, dst,
+ sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, iv, src, dst,
num_bytes, processed_ops);
return processed_ops;
@@ -209,16 +240,9 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
struct snow3g_session *session)
{
uint8_t *src, *dst;
- uint8_t *IV;
+ uint8_t *iv;
uint32_t length_in_bits, offset_in_bits;
- /* Sanity checks. */
- if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("iv");
- return 0;
- }
-
offset_in_bits = op->sym->cipher.data.offset;
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
@@ -227,10 +251,11 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- IV = op->sym->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->cipher_iv_offset);
length_in_bits = op->sym->cipher.data.length;
- sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, iv,
src, dst, length_in_bits, offset_in_bits);
return 1;
@@ -246,20 +271,9 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
uint8_t processed_ops = 0;
uint8_t *src, *dst;
uint32_t length_in_bits;
+ uint8_t *iv;
for (i = 0; i < num_ops; i++) {
- if (unlikely(ops[i]->sym->auth.aad.length != SNOW3G_IV_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("aad");
- break;
- }
-
- if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("digest");
- break;
- }
-
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -271,27 +285,29 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ SNOW3G_DIGEST_LENGTH);
sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
- ops[i]->sym->auth.digest.length) != 0)
+ SNOW3G_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ SNOW3G_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
}
processed_ops++;
@@ -356,7 +372,11 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct snow3g_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -408,8 +428,9 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, op->sym->session);
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(op->sym->session, 0, sizeof(struct snow3g_session));
+ rte_cryptodev_sym_session_free(op->sym->session);
op->sym->session = NULL;
}
@@ -548,28 +569,21 @@ cryptodev_snow3g_create(const char *name,
{
struct rte_cryptodev *dev;
struct snow3g_private *internals;
- uint64_t cpu_flags = 0;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- /* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- else {
- SNOW3G_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
-
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct snow3g_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct snow3g_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
SNOW3G_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_SNOW3G_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_snow3g_pmd_ops;
/* Register RX/TX burst functions for data path. */
@@ -611,7 +625,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -653,3 +667,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_snow3g_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index 7ce96be9..ae9569c8 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,7 +56,7 @@ static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
.max = 4,
.increment = 0
},
- .aad_size = {
+ .iv_size = {
.min = 16,
.max = 16,
.increment = 0
@@ -156,7 +156,7 @@ snow3g_pmd_info_get(struct rte_cryptodev *dev,
struct snow3g_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -220,7 +220,7 @@ snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
static int
snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct snow3g_qp *qp = NULL;
@@ -245,7 +245,7 @@ snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -289,33 +289,56 @@ snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a SNOW 3G session from a crypto xform chain */
-static void *
+static int
snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
SNOW3G_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- if (snow3g_set_session_parameters(sess, xform) != 0) {
+ ret = snow3g_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
SNOW3G_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-snow3g_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+snow3g_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct snow3g_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct snow3g_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops snow3g_pmd_ops = {
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
index 03973b97..fba3cb86 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include <sso_snow3g.h>
+#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
+/**< SNOW 3G PMD device name */
+
#define SNOW3G_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
@@ -91,6 +94,8 @@ struct snow3g_session {
enum rte_crypto_auth_operation auth_op;
sso_snow3g_key_schedule_t pKeySched_cipher;
sso_snow3g_key_schedule_t pKeySched_hash;
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
} __rte_cache_aligned;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index 1020544b..b301711e 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -45,6 +46,8 @@
#define ZUC_MAX_BURST 8
#define BYTE_LEN 8
+static uint8_t cryptodev_driver_id;
+
/** Get xform chain order. */
static enum zuc_operation
zuc_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -107,13 +110,20 @@ zuc_set_session_parameters(struct zuc_session *sess,
case ZUC_OP_NOT_SUPPORTED:
default:
ZUC_LOG_ERR("Unsupported operation chain order parameter");
- return -EINVAL;
+ return -ENOTSUP;
}
if (cipher_xform) {
/* Only ZUC EEA3 supported */
if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ return -ENOTSUP;
+
+ if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG_ERR("Wrong IV length");
return -EINVAL;
+ }
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
/* Copy the key */
memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
ZUC_IV_KEY_LENGTH);
@@ -122,8 +132,21 @@ zuc_set_session_parameters(struct zuc_session *sess,
if (auth_xform) {
/* Only ZUC EIA3 supported */
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
+ return -ENOTSUP;
+
+ if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
+ ZUC_LOG_ERR("Wrong digest length");
return -EINVAL;
+ }
+
sess->auth_op = auth_xform->auth.op;
+
+ if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
+ ZUC_LOG_ERR("Wrong IV length");
+ return -EINVAL;
+ }
+ sess->auth_iv_offset = auth_xform->auth.iv.offset;
+
/* Copy the key */
memcpy(sess->pKey_hash, auth_xform->auth.key.data,
ZUC_IV_KEY_LENGTH);
@@ -139,27 +162,40 @@ zuc_set_session_parameters(struct zuc_session *sess,
static struct zuc_session *
zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
{
- struct zuc_session *sess;
-
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_ZUC_PMD))
+ struct zuc_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct zuc_session *)get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ void *_sess = NULL;
+ void *_sess_private_data = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct zuc_session *)op->sym->session->_private;
- } else {
- struct rte_cryptodev_session *c_sess = NULL;
-
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
return NULL;
- sess = (struct zuc_session *)c_sess->_private;
+ sess = (struct zuc_session *)_sess_private_data;
if (unlikely(zuc_set_session_parameters(sess,
- op->sym->xform) != 0))
- return NULL;
+ op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+
return sess;
}
@@ -172,18 +208,11 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
unsigned i;
uint8_t processed_ops = 0;
uint8_t *src[ZUC_MAX_BURST], *dst[ZUC_MAX_BURST];
- uint8_t *IV[ZUC_MAX_BURST];
+ uint8_t *iv[ZUC_MAX_BURST];
uint32_t num_bytes[ZUC_MAX_BURST];
uint8_t *cipher_keys[ZUC_MAX_BURST];
for (i = 0; i < num_ops; i++) {
- /* Sanity checks. */
- if (unlikely(ops[i]->sym->cipher.iv.length != ZUC_IV_KEY_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("iv");
- break;
- }
-
if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
|| ((ops[i]->sym->cipher.data.offset
% BYTE_LEN) != 0)) {
@@ -212,7 +241,8 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
(ops[i]->sym->cipher.data.offset >> 3) :
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
- IV[i] = ops[i]->sym->cipher.iv.data;
+ iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->cipher_iv_offset);
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
cipher_keys[i] = session->pKey_cipher;
@@ -220,7 +250,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
processed_ops++;
}
- sso_zuc_eea3_n_buffer(cipher_keys, IV, src, dst,
+ sso_zuc_eea3_n_buffer(cipher_keys, iv, src, dst,
num_bytes, processed_ops);
return processed_ops;
@@ -237,20 +267,9 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
uint8_t *src;
uint32_t *dst;
uint32_t length_in_bits;
+ uint8_t *iv;
for (i = 0; i < num_ops; i++) {
- if (unlikely(ops[i]->sym->auth.aad.length != ZUC_IV_KEY_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("aad");
- break;
- }
-
- if (unlikely(ops[i]->sym->auth.digest.length != ZUC_DIGEST_LENGTH)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("digest");
- break;
- }
-
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -262,27 +281,29 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
+ iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+ session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ ZUC_DIGEST_LENGTH);
sso_zuc_eia3_1_buffer(session->pKey_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
/* Verify digest. */
if (memcmp(dst, ops[i]->sym->auth.digest.data,
- ops[i]->sym->auth.digest.length) != 0)
+ ZUC_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* Trim area used for digest from mbuf. */
rte_pktmbuf_trim(ops[i]->sym->m_src,
- ops[i]->sym->auth.digest.length);
+ ZUC_DIGEST_LENGTH);
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
sso_zuc_eia3_1_buffer(session->pKey_hash,
- ops[i]->sym->auth.aad.data, src,
+ iv, src,
length_in_bits, dst);
}
processed_ops++;
@@ -332,7 +353,11 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
- if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(session, 0, sizeof(struct zuc_session));
+ memset(ops[i]->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -448,28 +473,21 @@ cryptodev_zuc_create(const char *name,
{
struct rte_cryptodev *dev;
struct zuc_private *internals;
- uint64_t cpu_flags = 0;
+ uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
if (init_params->name[0] == '\0')
snprintf(init_params->name, sizeof(init_params->name),
"%s", name);
- /* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- else {
- ZUC_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
-
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct zuc_private), init_params->socket_id);
+ dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+ sizeof(struct zuc_private), init_params->socket_id,
+ vdev);
if (dev == NULL) {
ZUC_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
}
- dev->dev_type = RTE_CRYPTODEV_ZUC_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_zuc_pmd_ops;
/* Register RX/TX burst functions for data path. */
@@ -511,7 +529,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
@@ -552,3 +570,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_zuc_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index e793459c..52c6aed8 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,7 +56,7 @@ static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
.max = 4,
.increment = 0
},
- .aad_size = {
+ .iv_size = {
.min = 16,
.max = 16,
.increment = 0
@@ -80,7 +80,7 @@ static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
.min = 16,
.max = 16,
.increment = 0
- }
+ },
}, }
}, }
},
@@ -156,7 +156,7 @@ zuc_pmd_info_get(struct rte_cryptodev *dev,
struct zuc_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
- dev_info->dev_type = dev->dev_type;
+ dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
dev_info->feature_flags = dev->feature_flags;
@@ -220,7 +220,7 @@ zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
static int
zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool)
{
struct zuc_qp *qp = NULL;
@@ -245,7 +245,7 @@ zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (qp->processed_ops == NULL)
goto qp_setup_cleanup;
- qp->sess_mp = dev->data->session_pool;
+ qp->sess_mp = session_pool;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
@@ -289,33 +289,56 @@ zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
/** Configure a ZUC session from a crypto xform chain */
-static void *
+static int
zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
- struct rte_crypto_sym_xform *xform, void *sess)
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
{
+ void *sess_private_data;
+ int ret;
+
if (unlikely(sess == NULL)) {
ZUC_LOG_ERR("invalid session struct");
- return NULL;
+ return -EINVAL;
}
- if (zuc_set_session_parameters(sess, xform) != 0) {
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = zuc_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
ZUC_LOG_ERR("failed configure session parameters");
- return NULL;
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
}
- return sess;
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-zuc_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+zuc_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- /*
- * Current just resetting the whole data structure, need to investigate
- * whether a more selective reset of key would be more performant
- */
- if (sess)
- memset(sess, 0, sizeof(struct zuc_session));
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ memset(sess_priv, 0, sizeof(struct zuc_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
struct rte_cryptodev_ops zuc_pmd_ops = {
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
index 030f120b..b706e0aa 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_private.h
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,9 @@
#include <sso_zuc.h>
+#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
+/**< KASUMI PMD device name */
+
#define ZUC_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
@@ -92,6 +95,8 @@ struct zuc_session {
enum rte_crypto_auth_operation auth_op;
uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
} __rte_cache_aligned;
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index 1cf389e8..3f6b8988 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -39,5 +39,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
DEPDIRS-sw = $(core-libs) librte_kvargs librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
DEPDIRS-octeontx = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+DEPDIRS-dpaa2 = $(core-libs) librte_bus_fslmc
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
new file mode 100644
index 00000000..3497d09d
--- /dev/null
+++ b/drivers/event/dpaa2/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_event_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
new file mode 100644
index 00000000..cf2d2741
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -0,0 +1,692 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_vdev.h>
+
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include "dpaa2_eventdev.h"
+#include <portal/dpaa2_hw_pvt.h>
+#include <mc/fsl_dpci.h>
+
+/* Clarifications
+ * Evendev = SoC Instance
+ * Eventport = DPIO Instance
+ * Eventqueue = DPCON Instance
+ * 1 Eventdev can have N Eventqueue
+ * Soft Event Flow is DPCI Instance
+ */
+
+static uint16_t
+dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_eventdev *ev_dev =
+ ((struct dpaa2_io_portal_t *)port)->eventdev;
+ struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
+ uint32_t queue_id = ev[0].queue_id;
+ struct evq_info_t *evq_info = &priv->evq_info[queue_id];
+ uint32_t fqid;
+ struct qbman_swp *swp;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t loop, frames_to_send;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ uint16_t num_tx = 0;
+ int ret;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (nb_events) {
+ frames_to_send = (nb_events >> 3) ?
+ MAX_TX_RING_SLOTS : nb_events;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ const struct rte_event *event = &ev[num_tx + loop];
+
+ if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
+ fqid = evq_info->dpci->queue[
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
+ else
+ fqid = evq_info->dpci->queue[
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
+ qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
+
+ if (event->impl_opaque) {
+ uint8_t dqrr_index = event->impl_opaque - 1;
+
+ qbman_eq_desc_set_dca(&eqdesc[loop], 1,
+ dqrr_index, 0);
+ DPAA2_PER_LCORE_DPIO->dqrr_size--;
+ DPAA2_PER_LCORE_DPIO->dqrr_held &=
+ ~(1 << dqrr_index);
+ }
+
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+
+ /*
+ * todo - need to align with hw context data
+ * to avoid copy
+ */
+ struct rte_event *ev_temp = rte_malloc(NULL,
+ sizeof(struct rte_event), 0);
+ rte_memcpy(ev_temp, event, sizeof(struct rte_event));
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
+ DPAA2_SET_FD_LEN((&fd_arr[loop]),
+ sizeof(struct rte_event));
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_enqueue_multiple_eqdesc(swp,
+ &eqdesc[loop], &fd_arr[loop],
+ frames_to_send - loop);
+ }
+ num_tx += frames_to_send;
+ nb_events -= frames_to_send;
+ }
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ return dpaa2_eventdev_enqueue_burst(port, ev, 1);
+}
+
+static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
+{
+ struct epoll_event epoll_ev;
+ int ret, i = 0;
+
+ qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
+ QBMAN_SWP_INTERRUPT_DQRI);
+
+RETRY:
+ ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ &epoll_ev, 1, timeout_ticks);
+ if (ret < 1) {
+ /* sometimes due to some spurious interrupts epoll_wait fails
+ * with errno EINTR. so here we are retrying epoll_wait in such
+ * case to avoid the problem.
+ */
+ if (errno == EINTR) {
+ PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
+ if (i++ > 10)
+ PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
+ goto RETRY;
+ }
+ }
+}
+
+static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
+static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct rte_event *ev)
+{
+ struct rte_event *ev_temp =
+ (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
+
+ RTE_SET_USED(swp);
+
+ rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
+ rte_free(ev_temp);
+ ev->impl_opaque = dqrr_index + 1;
+ DPAA2_PER_LCORE_DPIO->dqrr_size++;
+ DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ const struct qbman_result *dq;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct dpaa2_queue *rxq;
+ int num_pkts = 0, ret, i = 0;
+
+ RTE_SET_USED(port);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ /* Check if there are atomic contexts to be released */
+ while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
+ if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
+ dq = qbman_get_dqrr_from_idx(swp, i);
+ qbman_swp_dqrr_consume(swp, dq);
+ DPAA2_PER_LCORE_DPIO->dqrr_size--;
+ }
+ i++;
+ }
+ DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
+
+ do {
+ dq = qbman_swp_dqrr_next(swp);
+ if (!dq) {
+ if (!num_pkts && timeout_ticks) {
+ dpaa2_eventdev_dequeue_wait(timeout_ticks);
+ timeout_ticks = 0;
+ continue;
+ }
+ return num_pkts;
+ }
+
+ fd = qbman_result_DQ_fd(dq);
+
+ rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
+ if (rxq) {
+ rxq->cb(swp, fd, dq, &ev[num_pkts]);
+ } else {
+ qbman_swp_dqrr_consume(swp, dq);
+ PMD_DRV_LOG(ERR, "Null Return VQ received\n");
+ return 0;
+ }
+
+ num_pkts++;
+ } while (num_pkts < nb_events);
+
+ return num_pkts;
+}
+
+static uint16_t
+dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa2_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+ dev_info->min_dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_event_queues = priv->max_event_queues;
+ dev_info->max_event_queue_flows =
+ DPAA2_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports = RTE_MAX_LCORE;
+ dev_info->max_event_port_dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE;
+}
+
+static int
+dpaa2_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+
+ PMD_DRV_FUNC_TRACE();
+
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+ return 0;
+}
+
+static int
+dpaa2_eventdev_start(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_stop(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa2_eventdev_close(struct rte_eventdev *dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+ RTE_SET_USED(queue_conf);
+
+ queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
+ RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct evq_info_t *evq_info =
+ &priv->evq_info[queue_id];
+
+ PMD_DRV_FUNC_TRACE();
+
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(port_conf);
+
+ port_conf->new_event_threshold =
+ DPAA2_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+}
+
+static void
+dpaa2_eventdev_port_release(void *port)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port);
+}
+
+static int
+dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+
+ if (!dpaa2_io_portal[port_id].dpio_dev) {
+ dpaa2_io_portal[port_id].dpio_dev =
+ dpaa2_get_qbman_swp(port_id);
+ rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
+ if (!dpaa2_io_portal[port_id].dpio_dev)
+ return -1;
+ }
+
+ dpaa2_io_portal[port_id].eventdev = dev;
+ dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
+ return 0;
+}
+
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ int i;
+
+ PMD_DRV_FUNC_TRACE();
+
+ for (i = 0; i < nb_unlinks; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ evq_info->link = 0;
+ }
+
+ return (int)nb_unlinks;
+}
+
+static int
+dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_io_portal_t *dpaa2_portal = port;
+ struct evq_info_t *evq_info;
+ uint8_t channel_index;
+ int ret, i, n;
+
+ PMD_DRV_FUNC_TRACE();
+
+ for (i = 0; i < nb_links; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ if (evq_info->link)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(
+ dpaa2_portal->dpio_dev->dpio,
+ CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id, &channel_index);
+ if (ret < 0) {
+ PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
+ ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ evq_info->link = 1;
+ }
+
+ RTE_SET_USED(priorities);
+
+ return (int)nb_links;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &priv->evq_info[queues[n]];
+ qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
+ 0, dpaa2_portal->dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ evq_info->link = 0;
+ }
+ return ret;
+}
+
+static int
+dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint32_t scale = 1;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ *timeout_ticks = ns * scale;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+}
+
+static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
+ .dev_infos_get = dpaa2_eventdev_info_get,
+ .dev_configure = dpaa2_eventdev_configure,
+ .dev_start = dpaa2_eventdev_start,
+ .dev_stop = dpaa2_eventdev_stop,
+ .dev_close = dpaa2_eventdev_close,
+ .queue_def_conf = dpaa2_eventdev_queue_def_conf,
+ .queue_setup = dpaa2_eventdev_queue_setup,
+ .queue_release = dpaa2_eventdev_queue_release,
+ .port_def_conf = dpaa2_eventdev_port_def_conf,
+ .port_setup = dpaa2_eventdev_port_setup,
+ .port_release = dpaa2_eventdev_port_release,
+ .port_link = dpaa2_eventdev_port_link,
+ .port_unlink = dpaa2_eventdev_port_unlink,
+ .timeout_ticks = dpaa2_eventdev_timeout_ticks,
+ .dump = dpaa2_eventdev_dump
+};
+
+static int
+dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
+ struct dpaa2_dpcon_dev *dpcon_dev)
+{
+ struct dpci_rx_queue_cfg rx_queue_cfg;
+ int ret, i;
+
+ /*Do settings to get the frame on a DPCON object*/
+ rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
+ DPCI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
+ rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
+ rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
+
+ dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpaa2_eventdev_process_parallel;
+ dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpaa2_eventdev_process_atomic;
+
+ for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
+ rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
+ ret = dpci_set_rx_queue(&dpci_dev->dpci,
+ CMD_PRI_LOW,
+ dpci_dev->token, i,
+ &rx_queue_cfg);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "set_rx_q failed with err code: %d", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_create(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa2_eventdev *priv;
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+ struct dpaa2_dpci_dev *dpci_dev = NULL;
+ int ret;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct dpaa2_eventdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &dpaa2_eventdev_ops;
+ eventdev->schedule = NULL;
+ eventdev->enqueue = dpaa2_eventdev_enqueue;
+ eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
+ eventdev->dequeue = dpaa2_eventdev_dequeue;
+ eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ priv->max_event_queues = 0;
+
+ do {
+ dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
+ if (!dpcon_dev)
+ break;
+ priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
+
+ dpci_dev = rte_dpaa2_alloc_dpci_dev();
+ if (!dpci_dev) {
+ rte_dpaa2_free_dpcon_dev(dpcon_dev);
+ break;
+ }
+ priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
+
+ ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "dpci setup failed with err code: %d", ret);
+ return ret;
+ }
+ priv->max_event_queues++;
+ } while (dpcon_dev && dpci_dev);
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Initializing %s", name);
+ return dpaa2_eventdev_create(name);
+}
+
+static int
+dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Closing %s", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
+ .probe = dpaa2_eventdev_probe,
+ .remove = dpaa2_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
new file mode 100644
index 00000000..f79f78aa
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -0,0 +1,114 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA2_EVENTDEV_H__
+#define __DPAA2_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <mc/fsl_dpcon.h>
+#include <mc/fsl_mc_sys.h>
+
+#define EVENTDEV_NAME_DPAA2_PMD event_dpaa2
+
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV_DEBUG
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#define PMD_DRV_FUNC_TRACE() do { } while (0)
+#endif
+
+#define PMD_DRV_ERR(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+#define DPAA2_EVENT_DEFAULT_DPCI_PRIO 0
+
+#define DPAA2_EVENT_MAX_QUEUES 16
+#define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT 1
+#define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA2_EVENT_MAX_QUEUE_FLOWS 2048
+#define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
+#define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
+#define DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH 8
+#define DPAA2_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
+
+#define DPAA2_EVENT_QUEUE_ATOMIC_FLOWS 2048
+#define DPAA2_EVENT_QUEUE_ORDER_SEQUENCES 2048
+
+enum {
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE,
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE,
+ DPAA2_EVENT_DPCI_MAX_QUEUES
+};
+
+struct dpaa2_dpcon_dev {
+ TAILQ_ENTRY(dpaa2_dpcon_dev) next;
+ struct fsl_mc_io dpcon;
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpcon_id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t channel_index;
+};
+
+struct evq_info_t {
+ /* DPcon device */
+ struct dpaa2_dpcon_dev *dpcon;
+ /* Attached DPCI device */
+ struct dpaa2_dpci_dev *dpci;
+ /* Configuration provided by the user */
+ uint32_t event_queue_cfg;
+ uint8_t link;
+};
+
+struct dpaa2_eventdev {
+ struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES];
+ uint32_t dequeue_timeout_ns;
+ uint8_t max_event_queues;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t resvd_1;
+ uint32_t nb_event_queue_flows;
+ uint32_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_enqueue_depth;
+ uint32_t event_dev_cfg;
+};
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
+
+#endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
new file mode 100644
index 00000000..d3e73f90
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -0,0 +1,139 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_vfio.h>
+#include <mc/fsl_dpcon.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include "dpaa2_eventdev.h"
+
+TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
+static struct dpcon_dev_list dpcon_dev_list
+ = TAILQ_HEAD_INITIALIZER(dpcon_dev_list); /*!< DPCON device list */
+
+static int
+rte_dpaa2_create_dpcon_device(struct fslmc_vfio_device *vdev __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dpcon_id)
+{
+ struct dpaa2_dpcon_dev *dpcon_node;
+ struct dpcon_attr attr;
+ int ret;
+
+ /* Allocate DPAA2 dpcon handle */
+ dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
+ if (!dpcon_node) {
+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPCON Device");
+ return -1;
+ }
+
+ /* Open the dpcon object */
+ dpcon_node->dpcon.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpcon_open(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Resource alloc failure with err code: %d",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Get the device attributes */
+ ret = dpcon_get_attributes(&dpcon_node->dpcon,
+ CMD_PRI_LOW, dpcon_node->token, &attr);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Reading device failed with err code: %d",
+ ret);
+ rte_free(dpcon_node);
+ return -1;
+ }
+
+ /* Updating device specific private information*/
+ dpcon_node->qbman_ch_id = attr.qbman_ch_id;
+ dpcon_node->num_priorities = attr.num_priorities;
+ dpcon_node->dpcon_id = dpcon_id;
+ rte_atomic16_init(&dpcon_node->in_use);
+
+ TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
+
+ PMD_DRV_LOG(DEBUG, "DPAA2: Added [dpcon.%d]", dpcon_id);
+
+ return 0;
+}
+
+struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Get DPCON dev handle from list using index */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev && rte_atomic16_test_and_set(&dpcon_dev->in_use))
+ break;
+ }
+
+ return dpcon_dev;
+}
+
+void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon)
+{
+ struct dpaa2_dpcon_dev *dpcon_dev = NULL;
+
+ /* Match DPCON handle and mark it free */
+ TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) {
+ if (dpcon_dev == dpcon) {
+ rte_atomic16_dec(&dpcon_dev->in_use);
+ return;
+ }
+ }
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dpcon_obj = {
+ .object_id = DPAA2_MC_DPCON_DEVID,
+ .create = rte_dpaa2_create_dpcon_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dpcon, rte_dpaa2_dpcon_obj);
diff --git a/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map b/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
new file mode 100644
index 00000000..1c0b7559
--- /dev/null
+++ b/drivers/event/dpaa2/rte_pmd_dpaa2_event_version.map
@@ -0,0 +1,3 @@
+DPDK_17.08 {
+ local: *;
+};
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index aca3d095..e5661ca8 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 Cavium Networks. All rights reserved.
+# Copyright(c) 2017 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
index 3da7cfdd..ba6d5142 100644
--- a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index c80a4437..d829b491 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,6 +30,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <inttypes.h>
+
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_dev.h>
@@ -156,6 +158,8 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
dev->schedule = NULL;
dev->enqueue = ssows_enq;
dev->enqueue_burst = ssows_enq_burst;
+ dev->enqueue_new_burst = ssows_enq_new_burst;
+ dev->enqueue_forward_burst = ssows_enq_fwd_burst;
dev->dequeue = ssows_deq;
dev->dequeue_burst = ssows_deq_burst;
@@ -170,6 +174,7 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
{
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
dev_info->max_event_queues = edev->max_event_queues;
@@ -194,6 +199,8 @@ ssovf_configure(const struct rte_eventdev *dev)
ssovf_func_trace();
deq_tmo_ns = conf->dequeue_timeout_ns;
+ if (deq_tmo_ns == 0)
+ deq_tmo_ns = edev->min_deq_timeout_ns;
if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
edev->is_timeout_deq = 1;
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 6e0a3521..1cdc8104 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -34,7 +34,7 @@
#define __SSOVF_EVDEV_H__
#include <rte_config.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
#include <rte_io.h>
#include "rte_pmd_octeontx_ssovf.h"
@@ -190,6 +190,10 @@ ssovf_pmd_priv(const struct rte_eventdev *eventdev)
uint16_t ssows_enq(void *port, const struct rte_event *ev);
uint16_t ssows_enq_burst(void *port,
const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_new_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_enq_fwd_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks);
diff --git a/drivers/event/octeontx/ssovf_mbox.c b/drivers/event/octeontx/ssovf_mbox.c
index 7394a3a9..764414b5 100644
--- a/drivers/event/octeontx/ssovf_mbox.c
+++ b/drivers/event/octeontx/ssovf_mbox.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -172,7 +172,7 @@ mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
error:
ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
- m->tag_own, rx_hdr.tag, hdr->msg, hdr->coproc, res,
+ m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
hdr->res_code);
return res;
}
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
index b644ebde..e1c0c6d5 100644
--- a/drivers/event/octeontx/ssovf_probe.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index ad3fe684..5e17c7b8 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,7 +32,7 @@
#include "ssovf_worker.h"
-static force_inline void
+static __rte_always_inline void
ssows_new_event(struct ssows *ws, const struct rte_event *ev)
{
const uint64_t event_ptr = ev->u64;
@@ -43,7 +43,7 @@ ssows_new_event(struct ssows *ws, const struct rte_event *ev)
ssows_add_work(ws, event_ptr, tag, new_tt, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
{
const uint8_t cur_tt = ws->cur_tt;
@@ -72,7 +72,7 @@ ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
#define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
-static force_inline void
+static __rte_always_inline void
ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
{
const uint64_t event_ptr = ev->u64;
@@ -95,7 +95,7 @@ ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
ssows_add_work(ws, event_ptr, tag, new_tt, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
@@ -112,39 +112,39 @@ ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
ssows_fwd_group(ws, ev, grp);
}
-static force_inline void
+static __rte_always_inline void
ssows_release_event(struct ssows *ws)
{
if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
ssows_swtag_untag(ws);
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
RTE_SET_USED(timeout_ticks);
- ssows_swtag_wait(ws);
if (ws->swtag_req) {
ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
return 1;
} else {
return ssows_get_work(ws, ev);
}
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
uint64_t iter;
uint16_t ret = 1;
- ssows_swtag_wait(ws);
if (ws->swtag_req) {
ws->swtag_req = 0;
+ ssows_swtag_wait(ws);
} else {
ret = ssows_get_work(ws, ev);
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
@@ -171,7 +171,7 @@ ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
return ssows_deq_timeout(port, ev, timeout_ticks);
}
-force_inline uint16_t __hot
+__rte_always_inline uint16_t __hot
ssows_enq(void *port, const struct rte_event *ev)
{
struct ssows *ws = port;
@@ -179,6 +179,7 @@ ssows_enq(void *port, const struct rte_event *ev)
switch (ev->op) {
case RTE_EVENT_OP_NEW:
+ rte_smp_wmb();
ssows_new_event(ws, ev);
break;
case RTE_EVENT_OP_FORWARD:
@@ -200,6 +201,30 @@ ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
return ssows_enq(port, ev);
}
+uint16_t __hot
+ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ uint16_t i;
+ struct ssows *ws = port;
+
+ rte_smp_wmb();
+ for (i = 0; i < nb_events; i++)
+ ssows_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __hot
+ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ struct ssows *ws = port;
+ RTE_SET_USED(nb_events);
+
+ ssows_forward_event(ws, ev);
+
+ return 1;
+}
+
void
ssows_flush_events(struct ssows *ws, uint8_t queue_id)
{
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 300dfae8..55f72555 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2017.
+ * Copyright (C) Cavium, Inc. 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,6 +32,7 @@
#include <rte_common.h>
+#include <rte_branch_prediction.h>
#include "ssovf_evdev.h"
@@ -42,17 +43,13 @@ enum {
SSO_SYNC_EMPTY
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
#ifndef __hot
#define __hot __attribute__((hot))
#endif
/* SSO Operations */
-static force_inline uint16_t
+static __rte_always_inline uint16_t
ssows_get_work(struct ssows *ws, struct rte_event *ev)
{
uint64_t get_work0, get_work1;
@@ -70,7 +67,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
return !!get_work1;
}
-static force_inline void
+static __rte_always_inline void
ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
@@ -80,7 +77,7 @@ ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uint8_t grp)
{
@@ -92,7 +89,7 @@ ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
SSOW_VHWS_OP_SWTAG_FULL0));
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
{
uint64_t val;
@@ -101,7 +98,7 @@ ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
{
uint64_t val;
@@ -110,27 +107,27 @@ ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_untag(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
ws->cur_tt = SSO_SYNC_UNTAGGED;
}
-static force_inline void
+static __rte_always_inline void
ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
{
ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
SSOW_VHWS_OP_UPD_WQP_GRP0));
}
-static force_inline void
+static __rte_always_inline void
ssows_desched(struct ssows *ws)
{
ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
}
-static force_inline void
+static __rte_always_inline void
ssows_swtag_wait(struct ssows *ws)
{
/* Wait for the SWTAG/SWTAG_FULL operation */
diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile
index c2b2456b..e6d58711 100644
--- a/drivers/event/skeleton/Makefile
+++ b/drivers/event/skeleton/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 800bd76e..bcd20556 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -43,10 +43,9 @@
#include <rte_dev.h>
#include <rte_eal.h>
#include <rte_log.h>
+#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memzone.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
#include <rte_lcore.h>
#include <rte_vdev.h>
@@ -130,6 +129,7 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
dev_info->max_event_port_enqueue_depth = 16;
dev_info->max_num_events = (1ULL << 20);
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS;
}
@@ -427,18 +427,28 @@ static const struct rte_pci_id pci_id_skeleton_map[] = {
},
};
-static struct rte_eventdev_driver pci_eventdev_skeleton_pmd = {
- .pci_drv = {
- .id_table = pci_id_skeleton_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_event_pmd_pci_probe,
- .remove = rte_event_pmd_pci_remove,
- },
- .eventdev_init = skeleton_eventdev_init,
- .dev_private_size = sizeof(struct skeleton_eventdev),
+static int
+event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe(pci_drv, pci_dev,
+ sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
+}
+
+static int
+event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
+ .id_table = pci_id_skeleton_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = event_skeleton_pci_probe,
+ .remove = event_skeleton_pci_remove,
};
-RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
/* VDEV based event device */
diff --git a/drivers/event/skeleton/skeleton_eventdev.h b/drivers/event/skeleton/skeleton_eventdev.h
index 1ce62da7..32064721 100644
--- a/drivers/event/skeleton/skeleton_eventdev.h
+++ b/drivers/event/skeleton/skeleton_eventdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,7 +33,8 @@
#ifndef __SKELETON_EVENTDEV_H__
#define __SKELETON_EVENTDEV_H__
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_eventdev_pmd_vdev.h>
#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG
#define PMD_DRV_LOG(level, fmt, args...) \
diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h
index cdaee95d..734a3b4b 100644
--- a/drivers/event/sw/event_ring.h
+++ b/drivers/event/sw/event_ring.h
@@ -61,10 +61,6 @@ struct qe_ring {
struct rte_event ring[0] __rte_cache_aligned;
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
static inline struct qe_ring *
qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
{
@@ -91,19 +87,19 @@ qe_ring_destroy(struct qe_ring *r)
rte_free(r);
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_count(const struct qe_ring *r)
{
return r->write_idx - r->read_idx;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_free_count(const struct qe_ring *r)
{
return r->size - qe_ring_count(r);
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
unsigned int nb_qes, uint16_t *free_count)
{
@@ -130,7 +126,7 @@ qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
return nb_qes;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
unsigned int nb_qes, uint8_t *ops)
{
@@ -157,7 +153,7 @@ qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
return nb_qes;
}
-static force_inline unsigned int
+static __rte_always_inline unsigned int
qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
unsigned int nb_qes)
{
diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h
index d480d156..64cf6784 100644
--- a/drivers/event/sw/iq_ring.h
+++ b/drivers/event/sw/iq_ring.h
@@ -56,10 +56,6 @@ struct iq_ring {
struct rte_event ring[QID_IQ_DEPTH];
};
-#ifndef force_inline
-#define force_inline inline __attribute__((always_inline))
-#endif
-
static inline struct iq_ring *
iq_ring_create(const char *name, unsigned int socket_id)
{
@@ -81,19 +77,19 @@ iq_ring_destroy(struct iq_ring *r)
rte_free(r);
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_count(const struct iq_ring *r)
{
return r->write_idx - r->read_idx;
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_free_count(const struct iq_ring *r)
{
return QID_IQ_MASK - iq_ring_count(r);
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
const uint16_t read = r->read_idx;
@@ -112,7 +108,7 @@ iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
return nb_qes;
}
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
uint16_t read = r->read_idx;
@@ -132,7 +128,7 @@ iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
}
/* assumes there is space, from a previous dequeue_burst */
-static force_inline uint16_t
+static __rte_always_inline uint16_t
iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
{
uint16_t i, read = r->read_idx;
@@ -144,19 +140,19 @@ iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
return nb_qes;
}
-static force_inline const struct rte_event *
+static __rte_always_inline const struct rte_event *
iq_ring_peek(const struct iq_ring *r)
{
return &r->ring[r->read_idx & QID_IQ_MASK];
}
-static force_inline void
+static __rte_always_inline void
iq_ring_pop(struct iq_ring *r)
{
r->read_idx++;
}
-static force_inline int
+static __rte_always_inline int
iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
{
const uint16_t read = r->read_idx;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a31aaa66..9c534b7f 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -30,6 +30,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <inttypes.h>
#include <string.h>
#include <rte_vdev.h>
@@ -37,10 +38,11 @@
#include <rte_kvargs.h>
#include <rte_ring.h>
#include <rte_errno.h>
+#include <rte_event_ring.h>
+#include <rte_service_component.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define EVENTDEV_NAME_SW_PMD event_sw
#define NUMA_NODE_ARG "numa_node"
@@ -90,7 +92,8 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
} else if (q->type == RTE_SCHED_TYPE_ORDERED) {
p->num_ordered_qids++;
p->num_qids_mapped++;
- } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
+ q->type == RTE_SCHED_TYPE_PARALLEL) {
p->num_qids_mapped++;
}
@@ -138,7 +141,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
{
struct sw_evdev *sw = sw_pmd_priv(dev);
struct sw_port *p = &sw->ports[port_id];
- char buf[QE_RING_NAMESIZE];
+ char buf[RTE_RING_NAMESIZE];
unsigned int i;
struct rte_event_dev_info info;
@@ -159,10 +162,19 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
p->id = port_id;
p->sw = sw;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "rx_worker_ring");
- p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
- dev->data->socket_id);
+ /* check to see if rings exists - port_setup() can be called multiple
+ * times legally (assuming device is stopped). If ring exists, free it
+ * to so it gets re-created with the correct size
+ */
+ snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
+ port_id, "rx_worker_ring");
+ struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->rx_worker_ring == NULL) {
SW_LOG_ERR("Error creating RX worker ring for port %d\n",
port_id);
@@ -171,12 +183,18 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
p->inflight_max = conf->new_event_threshold;
- snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
- "cq_worker_ring");
- p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
- dev->data->socket_id);
+ /* check if ring exists, same as rx_worker above */
+ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
+ port_id, "cq_worker_ring");
+ existing_ring = rte_event_ring_lookup(buf);
+ if (existing_ring)
+ rte_event_ring_free(existing_ring);
+
+ p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
+ dev->data->socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
if (p->cq_worker_ring == NULL) {
- qe_ring_destroy(p->rx_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
port_id);
return -1;
@@ -202,8 +220,8 @@ sw_port_release(void *port)
if (p == NULL)
return;
- qe_ring_destroy(p->rx_worker_ring);
- qe_ring_destroy(p->cq_worker_ring);
+ rte_event_ring_free(p->rx_worker_ring);
+ rte_event_ring_free(p->cq_worker_ring);
memset(p, 0, sizeof(*p));
}
@@ -435,6 +453,7 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_EVENT_QOS),
};
@@ -509,8 +528,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
fprintf(f, "\n");
if (p->rx_worker_ring) {
- uint64_t used = qe_ring_count(p->rx_worker_ring);
- uint64_t space = qe_ring_free_count(p->rx_worker_ring);
+ uint64_t used = rte_event_ring_count(p->rx_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->rx_worker_ring);
const char *col = (space == 0) ? COL_RED : COL_RESET;
fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
PRIu64 COL_RESET"\n", col, used, space);
@@ -518,8 +538,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
fprintf(f, "\trx ring not initialized.\n");
if (p->cq_worker_ring) {
- uint64_t used = qe_ring_count(p->cq_worker_ring);
- uint64_t space = qe_ring_free_count(p->cq_worker_ring);
+ uint64_t used = rte_event_ring_count(p->cq_worker_ring);
+ uint64_t space = rte_event_ring_free_count(
+ p->cq_worker_ring);
const char *col = (space == 0) ? COL_RED : COL_RESET;
fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
PRIu64 COL_RESET"\n", col, used, space);
@@ -559,12 +580,13 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
inflights += qid->fids[flow].pcount;
}
- uint32_t cq;
- fprintf(f, "\tInflights: %u\tFlows pinned per port: ",
- inflights);
- for (cq = 0; cq < sw->port_count; cq++)
- fprintf(f, "%d ", affinities_per_port[cq]);
- fprintf(f, "\n");
+ uint32_t port;
+ fprintf(f, "\tPer Port Stats:\n");
+ for (port = 0; port < sw->port_count; port++) {
+ fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
+ qid->to_port[port]);
+ fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
+ }
uint32_t iq;
uint32_t iq_printed = 0;
@@ -593,6 +615,13 @@ sw_start(struct rte_eventdev *dev)
{
unsigned int i, j;
struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ /* check a service core is mapped to this service */
+ struct rte_service_spec *s = rte_service_get_by_name(sw->service_name);
+ if (!rte_service_is_running(s))
+ SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
+ s->name);
+
/* check all ports are set up */
for (i = 0; i < sw->port_count; i++)
if (sw->ports[i].rx_worker_ring == NULL) {
@@ -695,6 +724,14 @@ set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
return 0;
}
+
+static int32_t sw_sched_service_func(void *args)
+{
+ struct rte_eventdev *dev = args;
+ sw_event_schedule(dev);
+ return 0;
+}
+
static int
sw_probe(struct rte_vdev_device *vdev)
{
@@ -792,6 +829,8 @@ sw_probe(struct rte_vdev_device *vdev)
dev->dev_ops = &evdev_sw_ops;
dev->enqueue = sw_event_enqueue;
dev->enqueue_burst = sw_event_enqueue_burst;
+ dev->enqueue_new_burst = sw_event_enqueue_burst;
+ dev->enqueue_forward_burst = sw_event_enqueue_burst;
dev->dequeue = sw_event_dequeue;
dev->dequeue_burst = sw_event_dequeue_burst;
dev->schedule = sw_event_schedule;
@@ -806,6 +845,22 @@ sw_probe(struct rte_vdev_device *vdev)
sw->credit_update_quanta = credit_quanta;
sw->sched_quanta = sched_quanta;
+ /* register service with EAL */
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "%s_service", name);
+ snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
+ name);
+ service.socket_id = socket_id;
+ service.callback = sw_sched_service_func;
+ service.callback_userdata = (void *)dev;
+
+ int32_t ret = rte_service_register(&service);
+ if (ret) {
+ SW_LOG_ERR("service register() failed");
+ return -ENOEXEC;
+ }
+
return 0;
}
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 61c671d6..71de3c14 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -34,7 +34,7 @@
#define _SW_EVDEV_H_
#include <rte_eventdev.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
#include <rte_atomic.h>
#define SW_DEFAULT_CREDIT_QUANTA 32
@@ -59,6 +59,7 @@
#define EVENTDEV_NAME_SW_PMD event_sw
#define SW_PMD_NAME RTE_STR(event_sw)
+#define SW_PMD_NAME_MAX 64
#define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
@@ -149,6 +150,7 @@ struct sw_qid {
uint32_t cq_num_mapped_cqs;
uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
uint32_t cq_map[SW_PORTS_MAX];
+ uint64_t to_port[SW_PORTS_MAX];
/* Track flow ids for atomic load balancing */
struct sw_fid_t fids[SW_QID_NUM_FIDS];
@@ -189,9 +191,9 @@ struct sw_port {
int16_t num_ordered_qids;
/** Ring and buffer for pulling events from workers for scheduling */
- struct qe_ring *rx_worker_ring __rte_cache_aligned;
+ struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
/** Ring and buffer for pushing packets to workers after scheduling */
- struct qe_ring *cq_worker_ring;
+ struct rte_event_ring *cq_worker_ring;
/* hole */
@@ -275,6 +277,8 @@ struct sw_evdev {
/* store num stats and offset of the stats for each queue */
uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ char service_name[SW_PMD_NAME_MAX];
};
static inline struct sw_evdev *
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index a333a6f0..8a2c9d4f 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -32,9 +32,9 @@
#include <rte_ring.h>
#include <rte_hash_crc.h>
+#include <rte_event_ring.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
#define SW_IQS_MASK (SW_IQS_MAX-1)
@@ -119,11 +119,12 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
p->stats.tx_pkts++;
qid->stats.tx_pkts++;
+ qid->to_port[cq]++;
/* if we just filled in the last slot, flush the buffer */
if (sw->cq_ring_space[cq] == 0) {
- struct qe_ring *worker = p->cq_worker_ring;
- qe_ring_enqueue_burst(worker, p->cq_buf,
+ struct rte_event_ring *worker = p->cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, p->cq_buf,
p->cq_buf_count,
&sw->cq_ring_space[cq]);
p->cq_buf_count = 0;
@@ -170,7 +171,8 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
cq = qid->cq_map[cq_idx];
if (++cq_idx == qid->cq_num_mapped_cqs)
cq_idx = 0;
- } while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
+ } while (rte_event_ring_free_count(
+ sw->ports[cq].cq_worker_ring) == 0 ||
sw->ports[cq].inflights == SW_PORT_HIST_LIST);
struct sw_port *p = &sw->ports[cq];
@@ -362,17 +364,17 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
return pkts_iter;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
{
RTE_SET_USED(sw);
- struct qe_ring *worker = port->rx_worker_ring;
+ struct rte_event_ring *worker = port->rx_worker_ring;
port->pp_buf_start = 0;
- port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
- RTE_DIM(port->pp_buf));
+ port->pp_buf_count = rte_event_ring_dequeue_burst(worker, port->pp_buf,
+ RTE_DIM(port->pp_buf), NULL);
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
__pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
{
static struct reorder_buffer_entry dummy_rob;
@@ -585,8 +587,8 @@ sw_event_schedule(struct rte_eventdev *dev)
* worker cores: aka, do the ring transfers batched.
*/
for (i = 0; i < sw->port_count; i++) {
- struct qe_ring *worker = sw->ports[i].cq_worker_ring;
- qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+ struct rte_event_ring *worker = sw->ports[i].cq_worker_ring;
+ rte_event_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
sw->ports[i].cq_buf_count,
&sw->cq_ring_space[i]);
sw->ports[i].cq_buf_count = 0;
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index 9cb6bef5..d76d3d5c 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -32,9 +32,9 @@
#include <rte_atomic.h>
#include <rte_cycles.h>
+#include <rte_event_ring.h>
#include "sw_evdev.h"
-#include "event_ring.h"
#define PORT_ENQUEUE_MAX_BURST_SIZE 64
@@ -52,13 +52,31 @@ sw_event_release(struct sw_port *p, uint8_t index)
ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
uint16_t free_count;
- qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
+ rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
/* each release returns one credit */
p->outstanding_releases--;
p->inflight_credits++;
}
+/*
+ * special-case of rte_event_ring enqueue, with overriding the ops member on
+ * the events that get written to the ring.
+ */
+static inline unsigned int
+enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
+ unsigned int n, uint8_t *ops)
+{
+ struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
+ unsigned int i;
+
+ memcpy(tmp_evs, events, n * sizeof(events[0]));
+ for (i = 0; i < n; i++)
+ tmp_evs[i].op = ops[i];
+
+ return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
+}
+
uint16_t
sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
{
@@ -87,6 +105,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
return 0;
}
+ uint32_t forwards = 0;
for (i = 0; i < num; i++) {
int op = ev[i].op;
int outstanding = p->outstanding_releases > 0;
@@ -95,6 +114,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
outstanding;
+ forwards += (op == RTE_EVENT_OP_FORWARD);
new_ops[i] = sw_qe_flag_map[op];
new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
@@ -113,8 +133,11 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
}
}
+ /* handle directed port forward credits */
+ p->inflight_credits -= forwards * p->is_directed;
+
/* returns number of events actually enqueued */
- uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
+ uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
new_ops);
if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
uint64_t burst_ticks = rte_get_timer_cycles() -
@@ -141,7 +164,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
RTE_SET_USED(wait);
struct sw_port *p = (void *)port;
struct sw_evdev *sw = (void *)p->sw;
- struct qe_ring *ring = p->cq_worker_ring;
+ struct rte_event_ring *ring = p->cq_worker_ring;
uint32_t credit_update_quanta = sw->credit_update_quanta;
/* check that all previous dequeues have been released */
@@ -153,7 +176,7 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
}
/* returns number of events actually dequeued */
- uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
+ uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
if (unlikely(ndeq == 0)) {
p->outstanding_releases = 0;
p->zero_polls++;
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index c7b1abe8..8cb6d88d 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -30,9 +30,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_event_ring.h>
#include "sw_evdev.h"
#include "iq_ring.h"
-#include "event_ring.h"
enum xstats_type {
/* common stats */
@@ -57,6 +57,7 @@ enum xstats_type {
iq_used,
/* qid port mapping specific */
pinned,
+ pkts, /* note: qid-to-port pkts */
};
typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
@@ -104,10 +105,10 @@ get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
case calls: return p->total_polls;
case credits: return p->inflight_credits;
case poll_return: return p->zero_polls;
- case rx_used: return qe_ring_count(p->rx_worker_ring);
- case rx_free: return qe_ring_free_count(p->rx_worker_ring);
- case tx_used: return qe_ring_count(p->cq_worker_ring);
- case tx_free: return qe_ring_free_count(p->cq_worker_ring);
+ case rx_used: return rte_event_ring_count(p->rx_worker_ring);
+ case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
+ case tx_used: return rte_event_ring_count(p->cq_worker_ring);
+ case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
default: return -1;
}
}
@@ -179,6 +180,8 @@ get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
return pin;
} while (0);
break;
+ case pkts:
+ return qid->to_port[port];
default: return -1;
}
}
@@ -246,8 +249,11 @@ sw_xstats_init(struct sw_evdev *sw)
static const enum xstats_type qid_iq_types[] = { iq_used };
/* reset allowed */
- static const char * const qid_port_stats[] = { "pinned_flows" };
- static const enum xstats_type qid_port_types[] = { pinned };
+ static const char * const qid_port_stats[] = { "pinned_flows",
+ "packets"
+ };
+ static const enum xstats_type qid_port_types[] = { pinned, pkts };
+ static const uint8_t qid_port_reset_allowed[] = {0, 1};
/* reset allowed */
/* ---- end of stat definitions ---- */
@@ -312,8 +318,9 @@ sw_xstats_init(struct sw_evdev *sw)
port, port_stats[i]);
}
- for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
- SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+ for (bkt = 0; bkt < (rte_event_ring_get_capacity(
+ sw->ports[port].cq_worker_ring) >>
+ SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
sw->xstats[stat] = (struct sw_xstats_entry){
.fn = get_port_bucket_stat,
@@ -376,7 +383,8 @@ sw_xstats_init(struct sw_evdev *sw)
.stat = qid_port_types[i],
.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
.extra_arg = port,
- .reset_allowed = 0,
+ .reset_allowed =
+ qid_port_reset_allowed[i],
};
snprintf(sname, sizeof(sname),
"qid_%u_port_%u_%s",
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index 8fd40e1b..efd55f23 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 NXP. All rights reserved.
-# All rights reserved.
+# Copyright 2017 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index 26ccebde..1a174968 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 NXP. All rights reserved.
-# All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 5a5d6aaa..6df203fc 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,7 +47,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <mc/fsl_dpbp.h>
@@ -63,6 +62,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
{
struct dpaa2_bp_list *bp_list;
struct dpaa2_dpbp_dev *avail_dpbp;
+ struct dpaa2_bp_info *bp_info;
struct dpbp_attr dpbp_attr;
uint32_t bpid;
int ret, p_ret;
@@ -127,7 +127,12 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
rte_dpaa2_bpid_info[bpid].bpid = bpid;
- mp->pool_data = (void *)&rte_dpaa2_bpid_info[bpid];
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
+ sizeof(struct dpaa2_bp_info));
+ mp->pool_data = (void *)bp_info;
PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
@@ -161,7 +166,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
while (temp) {
if (temp == bp) {
prev->next = temp->next;
- free(bp);
+ rte_free(bp);
break;
}
prev = temp;
@@ -169,6 +174,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
}
}
+ rte_free(mp->pool_data);
dpaa2_free_dpbp_dev(dpbp_node);
}
@@ -188,7 +194,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+ RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
return;
}
}
@@ -267,7 +273,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+ RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
return ret;
}
}
@@ -294,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
bp_info->meta_data_size, n);
- return ret;
+ return -ENOBUFS;
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i]; i++) {
@@ -323,7 +329,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured");
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
return -ENOENT;
}
rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
@@ -341,7 +347,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp || !mp->pool_data) {
- RTE_LOG(ERR, PMD, "Invalid mempool provided");
+ RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
return 0;
}
@@ -351,12 +357,12 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
dpbp_node->token, &num_of_bufs);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)",
+ RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
ret);
return 0;
}
- RTE_LOG(DEBUG, PMD, "Free bufs = %u", num_of_bufs);
+ RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
return num_of_bufs;
}
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
index c4d7fc02..56b71bed 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/ring/Makefile b/drivers/mempool/ring/Makefile
index 54630d99..b339d907 100644
--- a/drivers/mempool/ring/Makefile
+++ b/drivers/mempool/ring/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 NXP.
-# All rights reserved.
+# Copyright 2017 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/mempool/stack/Makefile b/drivers/mempool/stack/Makefile
index 8f3125c7..7577b23c 100644
--- a/drivers/mempool/stack/Makefile
+++ b/drivers/mempool/stack/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2017 NXP.
-# All rights reserved.
+# Copyright 2017 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 35ed8135..d33c9590 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -59,6 +59,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
DEPDIRS-ena = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
DEPDIRS-enic = $(core-libs) librte_hash
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe
+DEPDIRS-failsafe = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
DEPDIRS-fm10k = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 9ccb7af9..9a47852c 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -413,7 +413,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
if (data_size > buf_size) {
RTE_LOG(ERR, PMD,
"%s: %d bytes will not fit in mbuf (%d bytes)\n",
- dev->data->name, data_size, buf_size);
+ dev->device->name, data_size, buf_size);
return -ENOMEM;
}
@@ -568,7 +568,12 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
int rc, tpver, discard;
int qsockfd = -1;
unsigned int i, q, rdsize;
- int fanout_arg __rte_unused, bypass __rte_unused;
+#if defined(PACKET_FANOUT)
+ int fanout_arg;
+#endif
+#if defined(PACKET_QDISC_BYPASS)
+ int bypass;
+#endif
for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
pair = &kvlist->pairs[k_idx];
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 017817e2..6db362b0 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -77,6 +77,7 @@ static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
uint32_t pool);
static void eth_ark_macaddr_remove(struct rte_eth_dev *dev,
uint32_t index);
+static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size);
/*
* The packet generator is a functional block used to generate packet
@@ -179,6 +180,8 @@ static const struct eth_dev_ops ark_eth_dev_ops = {
.mac_addr_add = eth_ark_macaddr_add,
.mac_addr_remove = eth_ark_macaddr_remove,
.mac_addr_set = eth_ark_set_default_mac_addr,
+
+ .mtu_set = eth_ark_set_mtu,
};
static int
@@ -256,6 +259,10 @@ check_for_ext(struct ark_adapter *ark)
(void (*)(struct rte_eth_dev *, struct ether_addr *,
void *))
dlsym(ark->d_handle, "mac_addr_set");
+ ark->user_ext.set_mtu =
+ (int (*)(struct rte_eth_dev *, uint16_t,
+ void *))
+ dlsym(ark->d_handle, "set_mtu");
return found;
}
@@ -278,7 +285,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ret = check_for_ext(ark);
if (ret)
return ret;
- pci_dev = ARK_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
rte_eth_copy_pci_info(dev, pci_dev);
/* Use dummy function until setup */
@@ -346,8 +353,9 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
}
if (ark->user_ext.dev_init) {
- ark->user_data = ark->user_ext.dev_init(dev, ark->a_bar, 0);
- if (!ark->user_data) {
+ ark->user_data[dev->data->port_id] =
+ ark->user_ext.dev_init(dev, ark->a_bar, 0);
+ if (!ark->user_data[dev->data->port_id]) {
PMD_DRV_LOG(INFO,
"Failed to initialize PMD extension!"
" continuing without it\n");
@@ -369,7 +377,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
*/
if (ark->user_ext.dev_get_port_count)
port_count =
- ark->user_ext.dev_get_port_count(dev, ark->user_data);
+ ark->user_ext.dev_get_port_count(dev,
+ ark->user_data[dev->data->port_id]);
ark->num_ports = port_count;
for (p = 0; p < port_count; p++) {
@@ -410,9 +419,10 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
goto error;
}
- if (ark->user_ext.dev_init)
- ark->user_data =
+ if (ark->user_ext.dev_init) {
+ ark->user_data[eth_dev->data->port_id] =
ark->user_ext.dev_init(dev, ark->a_bar, p);
+ }
}
return ret;
@@ -508,7 +518,8 @@ eth_ark_dev_uninit(struct rte_eth_dev *dev)
return 0;
if (ark->user_ext.dev_uninit)
- ark->user_ext.dev_uninit(dev, ark->user_data);
+ ark->user_ext.dev_uninit(dev,
+ ark->user_data[dev->data->port_id]);
ark_pktgen_uninit(ark->pg);
ark_pktchkr_uninit(ark->pc);
@@ -529,7 +540,8 @@ eth_ark_dev_configure(struct rte_eth_dev *dev)
eth_ark_dev_set_link_up(dev);
if (ark->user_ext.dev_configure)
- return ark->user_ext.dev_configure(dev, ark->user_data);
+ return ark->user_ext.dev_configure(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -592,7 +604,8 @@ eth_ark_dev_start(struct rte_eth_dev *dev)
}
if (ark->user_ext.dev_start)
- ark->user_ext.dev_start(dev, ark->user_data);
+ ark->user_ext.dev_start(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -614,7 +627,8 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
/* Stop the extension first */
if (ark->user_ext.dev_stop)
- ark->user_ext.dev_stop(dev, ark->user_data);
+ ark->user_ext.dev_stop(dev,
+ ark->user_data[dev->data->port_id]);
/* Stop the packet generator */
if (ark->start_pg)
@@ -636,7 +650,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
}
/* Stop DDM */
- /* Wait up to 0.1 second. each stop is upto 1000 * 10 useconds */
+ /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
for (i = 0; i < 10; i++) {
status = ark_ddm_stop(ark->ddm.v, 1);
if (status == 0)
@@ -697,7 +711,8 @@ eth_ark_dev_close(struct rte_eth_dev *dev)
uint16_t i;
if (ark->user_ext.dev_close)
- ark->user_ext.dev_close(dev, ark->user_data);
+ ark->user_ext.dev_close(dev,
+ ark->user_data[dev->data->port_id]);
eth_ark_dev_stop(dev);
eth_ark_udm_force_close(dev);
@@ -751,7 +766,7 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G |
ETH_LINK_SPEED_100G);
- dev_info->pci_dev = ARK_DEV_TO_PCI(dev);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
}
static int
@@ -765,7 +780,7 @@ eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (ark->user_ext.link_update) {
return ark->user_ext.link_update
(dev, wait_to_complete,
- ark->user_data);
+ ark->user_data[dev->data->port_id]);
}
return 0;
}
@@ -778,7 +793,8 @@ eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.dev_set_link_up)
- return ark->user_ext.dev_set_link_up(dev, ark->user_data);
+ return ark->user_ext.dev_set_link_up(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -790,7 +806,8 @@ eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.dev_set_link_down)
- return ark->user_ext.dev_set_link_down(dev, ark->user_data);
+ return ark->user_ext.dev_set_link_down(dev,
+ ark->user_data[dev->data->port_id]);
return 0;
}
@@ -813,7 +830,8 @@ eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
if (ark->user_ext.stats_get)
- ark->user_ext.stats_get(dev, stats, ark->user_data);
+ ark->user_ext.stats_get(dev, stats,
+ ark->user_data[dev->data->port_id]);
}
static void
@@ -824,11 +842,12 @@ eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
(struct ark_adapter *)dev->data->dev_private;
for (i = 0; i < dev->data->nb_tx_queues; i++)
- eth_tx_queue_stats_reset(dev->data->rx_queues[i]);
+ eth_tx_queue_stats_reset(dev->data->tx_queues[i]);
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
if (ark->user_ext.stats_reset)
- ark->user_ext.stats_reset(dev, ark->user_data);
+ ark->user_ext.stats_reset(dev,
+ ark->user_data[dev->data->port_id]);
}
static int
@@ -845,7 +864,7 @@ eth_ark_macaddr_add(struct rte_eth_dev *dev,
mac_addr,
index,
pool,
- ark->user_data);
+ ark->user_data[dev->data->port_id]);
return 0;
}
return -ENOTSUP;
@@ -858,7 +877,8 @@ eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.mac_addr_remove)
- ark->user_ext.mac_addr_remove(dev, index, ark->user_data);
+ ark->user_ext.mac_addr_remove(dev, index,
+ ark->user_data[dev->data->port_id]);
}
static void
@@ -869,7 +889,21 @@ eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
(struct ark_adapter *)dev->data->dev_private;
if (ark->user_ext.mac_addr_set)
- ark->user_ext.mac_addr_set(dev, mac_addr, ark->user_data);
+ ark->user_ext.mac_addr_set(dev, mac_addr,
+ ark->user_data[dev->data->port_id]);
+}
+
+static int
+eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.set_mtu)
+ return ark->user_ext.set_mtu(dev, size,
+ ark->user_data[dev->data->port_id]);
+
+ return -ENOTSUP;
}
static inline int
diff --git a/drivers/net/ark/ark_ethdev.h b/drivers/net/ark/ark_ethdev.h
index 9f8d32fc..df5547bf 100644
--- a/drivers/net/ark/ark_ethdev.h
+++ b/drivers/net/ark/ark_ethdev.h
@@ -34,8 +34,4 @@
#ifndef _ARK_ETHDEV_H_
#define _ARK_ETHDEV_H_
-#define ARK_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
-
-
#endif
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
index f805f64f..63b7a261 100644
--- a/drivers/net/ark/ark_ext.h
+++ b/drivers/net/ark/ark_ext.h
@@ -112,4 +112,8 @@ void mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
void *user_data);
+int set_mtu(struct rte_eth_dev *dev,
+ uint16_t size,
+ void *user_data);
+
#endif
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
index a2e9e8ff..2a6375fe 100644
--- a/drivers/net/ark/ark_global.h
+++ b/drivers/net/ark/ark_global.h
@@ -64,7 +64,7 @@
#define ARK_RCPACING_BASE 0xb0000
#define ARK_EXTERNAL_BASE 0x100000
#define ARK_MPU_QOFFSET 0x00100
-#define ARK_MAX_PORTS 8
+#define ARK_MAX_PORTS RTE_MAX_ETHPORTS
#define offset8(n) n
#define offset16(n) ((n) / 2)
@@ -106,11 +106,12 @@ struct ark_user_ext {
void *);
void (*mac_addr_remove)(struct rte_eth_dev *, uint32_t, void *);
void (*mac_addr_set)(struct rte_eth_dev *, struct ether_addr *, void *);
+ int (*set_mtu)(struct rte_eth_dev *, uint16_t, void *);
};
struct ark_adapter {
/* User extension private data */
- void *user_data;
+ void *user_data[ARK_MAX_PORTS];
/* Pointers to packet generator and checker */
int start_pg;
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index fe6849f5..c746a0e2 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -71,8 +71,7 @@ static void avp_dev_close(struct rte_eth_dev *dev);
static void avp_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static int avp_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
@@ -113,9 +112,6 @@ static void avp_dev_stats_get(struct rte_eth_dev *dev,
static void avp_dev_stats_reset(struct rte_eth_dev *dev);
-#define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
-
-
#define AVP_MAX_RX_BURST 64
#define AVP_MAX_TX_BURST 64
#define AVP_MAX_MAC_ADDRS 1
@@ -393,7 +389,7 @@ static void *
avp_dev_translate_address(struct rte_eth_dev *eth_dev,
phys_addr_t host_phys_addr)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_mem_resource *resource;
struct rte_avp_memmap_info *info;
struct rte_avp_memmap *map;
@@ -446,7 +442,7 @@ avp_dev_version_check(uint32_t version)
static int
avp_dev_check_regions(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_avp_memmap_info *memmap;
struct rte_avp_device_info *info;
struct rte_mem_resource *resource;
@@ -582,7 +578,7 @@ _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
static void
_avp_set_queue_counts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_avp_device_info *host_info;
void *addr;
@@ -642,7 +638,7 @@ avp_dev_attach(struct rte_eth_dev *eth_dev)
* re-run the device create utility which will parse the new host info
* and setup the AVP device queue pointers.
*/
- ret = avp_dev_create(AVP_DEV_TO_PCI(eth_dev), eth_dev);
+ ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
ret);
@@ -696,7 +692,7 @@ static void
avp_dev_interrupt_handler(void *data)
{
struct rte_eth_dev *eth_dev = data;
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
uint32_t status, value;
int ret;
@@ -755,7 +751,7 @@ avp_dev_interrupt_handler(void *data)
static int
avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
int ret;
@@ -780,7 +776,7 @@ avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
static int
avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
int ret;
@@ -805,7 +801,7 @@ avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
static int
avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
int ret;
/* register a callback handler with UIO for interrupt notifications */
@@ -825,7 +821,7 @@ avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
static int
avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
uint32_t value;
@@ -986,7 +982,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
int ret;
- pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
eth_dev->dev_ops = &avp_eth_dev_ops;
eth_dev->rx_pkt_burst = &avp_recv_pkts;
eth_dev->tx_pkt_burst = &avp_xmit_pkts;
@@ -2011,7 +2007,7 @@ avp_dev_tx_queue_release(void *tx_queue)
static int
avp_dev_configure(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_avp_device_info *host_info;
struct rte_avp_device_config config;
@@ -2206,8 +2202,7 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- dev_info->driver_name = "rte_avp_pmd";
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->max_rx_queues = avp->max_rx_queues;
dev_info->max_tx_queues = avp->max_tx_queues;
dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 1a7e1c8e..06733d15 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -120,7 +120,7 @@ static int bnx2x_alloc_mem(struct bnx2x_softc *sc);
static void bnx2x_free_mem(struct bnx2x_softc *sc);
static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc);
static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc);
-static __attribute__ ((noinline))
+static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
@@ -887,7 +887,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
/*
* Post a slowpath command.
*
- * A slowpath command is used to propogate a configuration change through
+ * A slowpath command is used to propagate a configuration change through
* the controller in a controlled manner, allowing each STORM processor and
* other H/W blocks to phase in the change. The commands sent on the
* slowpath are referred to as ramrods. Depending on the ramrod used the
@@ -1962,7 +1962,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
}
/* stop the controller */
-__attribute__ ((noinline))
+__rte_noinline
int
bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link)
{
@@ -2002,7 +2002,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
/*
* Nothing to do during unload if previous bnx2x_nic_load()
- * did not completed succesfully - all resourses are released.
+ * did not completed successfully - all resourses are released.
*/
if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
return 0;
@@ -3931,7 +3931,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
/*
- * If the olny PXP2_EOP_ERROR_BIT is set in
+ * If the only PXP2_EOP_ERROR_BIT is set in
* STS0 and STS1 - clear it
*
* probably we lose additional attentions between
@@ -5910,7 +5910,7 @@ static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close)
(val | HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
-/* Prevent incomming interrupts in IGU */
+/* Prevent incoming interrupts in IGU */
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
if (close)
@@ -7124,7 +7124,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
}
/* start the controller */
-static __attribute__ ((noinline))
+static __rte_noinline
int bnx2x_nic_load(struct bnx2x_softc *sc)
{
uint32_t val;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 6d7c002f..6f62a37f 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -119,7 +119,7 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
bnx2x_link_update(dev);
}
-static __rte_unused void
+static void
bnx2x_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -439,10 +439,10 @@ bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
static void
-bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
+bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
@@ -531,7 +531,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index c489cbee..6223cfef 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1371,9 +1371,9 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
cur_query_entry->kind = STATS_TYPE_PORT;
- /* For port query index is a DONT CARE */
+ /* For port query index is a DON'T CARE */
cur_query_entry->index = SC_PORT(sc);
- /* For port query funcID is a DONT CARE */
+ /* For port query funcID is a DON'T CARE */
cur_query_entry->funcID = htole16(SC_FUNC(sc));
cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
@@ -1385,7 +1385,7 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
cur_query_entry->kind = STATS_TYPE_PF;
- /* For PF query index is a DONT CARE */
+ /* For PF query index is a DON'T CARE */
cur_query_entry->index = SC_PORT(sc);
cur_query_entry->funcID = htole16(SC_FUNC(sc));
cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
@@ -1450,7 +1450,7 @@ void bnx2x_memset_stats(struct bnx2x_softc *sc)
if (sc->port.pmf && sc->port.port_stx)
bnx2x_port_stats_base_init(sc);
- /* mark the end of statistics initializiation */
+ /* mark the end of statistics initialization */
sc->stats_init = false;
}
@@ -1536,7 +1536,7 @@ bnx2x_stats_init(struct bnx2x_softc *sc)
bnx2x_port_stats_base_init(sc);
}
- /* mark the end of statistics initializiation */
+ /* mark the end of statistics initialization */
sc->stats_init = FALSE;
}
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h
index 955ea982..d7cc11be 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -282,7 +282,7 @@ struct bnx2x_vf_bulletin {
uint16_t version;
uint16_t length;
- uint64_t valid_bitmap; /* bitmap indicating wich fields
+ uint64_t valid_bitmap; /* bitmap indicating which fields
* hold valid values
*/
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 5808e1ae..5cce6647 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -2499,7 +2499,7 @@ struct shmem2_region {
* SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
* bit 28 when 1'b1 EEE was requested.
* bit 29 when 1'b1 tx lpi was requested.
- * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if
* 30:29 are 2'b11.
* bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
* value. When 1'b1 those bits contains a value times 16 microseconds.
@@ -3911,7 +3911,7 @@ struct client_init_rx_data
uint16_t bd_pause_thr_high /* number of remaining bds above which, we send un-pause message */;
uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
- uint16_t rx_cos_mask /* the bits that will be set on pfc/ safc paket whith will be genratet when this ring is full. for regular flow control set this to 1 */;
+ uint16_t rx_cos_mask /* the bits that will be set on pfc/ safc paket with will be genratet when this ring is full. for regular flow control set this to 1 */;
uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */;
uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */;
uint32_t reserved6[2];
@@ -4903,7 +4903,7 @@ enum eth_tx_vlan_type
*/
enum eth_vlan_filter_mode
{
- ETH_VLAN_FILTER_ANY_VLAN /* Dont filter by vlan */,
+ ETH_VLAN_FILTER_ANY_VLAN /* Don't filter by vlan */,
ETH_VLAN_FILTER_SPECIFIC_VLAN /* Only the vlan_id is allowed */,
ETH_VLAN_FILTER_CLASSIFY /* Vlan will be added to CAM for classification */,
MAX_ETH_VLAN_FILTER_MODE};
@@ -4937,7 +4937,7 @@ struct mac_configuration_entry
#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) /* BitField flags (use enum eth_vlan_filter_mode) */
#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
-#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) /* BitField flags BitField flags 0 - cant remove vlan 1 - can remove vlan. relevant only to everest1 */
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) /* BitField flags BitField flags 0 - can't remove vlan 1 - can remove vlan. relevant only to everest1 */
#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) /* BitField flags BitField flags 0 - not broadcast 1 - broadcast. relevant only to everest1 */
#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
@@ -5024,7 +5024,7 @@ struct tstorm_eth_function_common_config
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) /* BitField config_flagsGeneral configuration flags RSS mode of operation (use enum eth_rss_mode) */
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) /* BitField config_flagsGeneral configuration flags 0 - Dont filter by vlan, 1 - Filter according to the vlans specificied in mac_filter_config */
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) /* BitField config_flagsGeneral configuration flags 0 - Don't filter by vlan, 1 - Filter according to the vlans specificied in mac_filter_config */
#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) /* BitField config_flagsGeneral configuration flags */
#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
@@ -5634,7 +5634,7 @@ struct flow_control_configuration
struct function_start_data
{
uint8_t function_mode /* the function mode */;
- uint8_t allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independant function mode. (E2/E3 Only) */;
+ uint8_t allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independent function mode. (E2/E3 Only) */;
uint16_t sd_vlan_tag /* value of Vlan in case of switch depended multi-function mode */;
uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */;
uint8_t path_id;
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index d25e2803..4576c565 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -304,7 +304,7 @@ static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mod
/*
- * congestion managment port init api description
+ * congestion management port init api description
* the api works as follows:
* the driver should pass the cmng_init_input struct, the port_init function
* will prepare the required internal ram structure which will be passed back
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 22f2dc95..ef7f9fea 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -2676,7 +2676,7 @@ static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
* @cmd:
* @start_cnt: first line in the ramrod data that may be used
*
- * This function is called iff there is enough place for the current command in
+ * This function is called if there is enough place for the current command in
* the ramrod data.
* Returns number of lines filled in the ramrod data in total.
*/
@@ -2834,7 +2834,7 @@ static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
o->clear_sched(o);
- /* The below may be TRUE iff there was enough room in ramrod
+ /* The below may be TRUE if there was enough room in ramrod
* data for all pending commands and for the current
* command. Otherwise the current command would have been added
* to the pending commands and p->mcast_list_len would have been
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index 9c1f55df..e7ec96e9 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -1116,10 +1116,10 @@ struct ecore_config_rss_params {
/* RSS hash values */
uint32_t rss_key[10];
- /* valid only iff ECORE_RSS_UPDATE_TOE is set */
+ /* valid only if ECORE_RSS_UPDATE_TOE is set */
uint16_t toe_rss_bitmap;
- /* valid iff ECORE_RSS_TUNNELING is set */
+ /* valid if ECORE_RSS_TUNNELING is set */
uint16_t tunnel_value;
uint16_t tunnel_mask;
};
@@ -1286,14 +1286,14 @@ struct rxq_pause_params {
uint16_t bd_th_hi;
uint16_t rcq_th_lo;
uint16_t rcq_th_hi;
- uint16_t sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */
- uint16_t sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */
+ uint16_t sge_th_lo; /* valid if ECORE_Q_FLG_TPA */
+ uint16_t sge_th_hi; /* valid if ECORE_Q_FLG_TPA */
uint16_t pri_map;
};
/* general */
struct ecore_general_setup_params {
- /* valid iff ECORE_Q_FLG_STATS */
+ /* valid if ECORE_Q_FLG_STATS */
uint8_t stat_id;
uint8_t spcl_id;
@@ -1312,19 +1312,19 @@ struct ecore_rxq_setup_params {
uint8_t fw_sb_id;
uint8_t cl_qzone_id;
- /* valid iff ECORE_Q_FLG_TPA */
+ /* valid if ECORE_Q_FLG_TPA */
uint16_t tpa_agg_sz;
uint8_t max_tpa_queues;
uint8_t rss_engine_id;
- /* valid iff ECORE_Q_FLG_MCAST */
+ /* valid if ECORE_Q_FLG_MCAST */
uint8_t mcast_engine_id;
uint8_t cache_line_log;
uint8_t sb_cq_index;
- /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+ /* valid if BXN2X_Q_FLG_SILENT_VLAN_REM */
uint16_t silent_removal_value;
uint16_t silent_removal_mask;
};
@@ -1335,12 +1335,12 @@ struct ecore_txq_setup_params {
uint8_t fw_sb_id;
uint8_t sb_cq_index;
- uint8_t cos; /* valid iff ECORE_Q_FLG_COS */
+ uint8_t cos; /* valid if ECORE_Q_FLG_COS */
uint16_t traffic_type;
/* equals to the leading rss client id, used for TX classification*/
uint8_t tss_leading_cl_id;
- /* valid iff ECORE_Q_FLG_DEF_VLAN */
+ /* valid if ECORE_Q_FLG_DEF_VLAN */
uint16_t default_vlan;
};
@@ -1733,7 +1733,7 @@ void ecore_init_mcast_obj(struct bnx2x_softc *sc,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * Return: 0 is operation was successfull and there are no pending completions,
+ * Return: 0 is operation was successful and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 9ffa7dc6..9d0f3136 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -2702,7 +2702,7 @@ static elink_status_t elink_eee_initial_config(struct elink_params *params,
{
vars->eee_status |= ((uint32_t) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
- /* Propogate params' bits --> vars (for migration exposure) */
+ /* Propagate params' bits --> vars (for migration exposure) */
if (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)
vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
else
@@ -3450,7 +3450,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
};
PMD_DRV_LOG(DEBUG, "Enable Auto Negotiation for KR");
- /* Set to default registers that may be overriden by 10G force */
+ /* Set to default registers that may be overridden by 10G force */
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -4363,14 +4363,14 @@ static void elink_sync_link(struct elink_params *params,
switch (vars->link_status & LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case ELINK_LINK_10THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_10TFD:
vars->line_speed = ELINK_SPEED_10;
break;
case ELINK_LINK_100TXHD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_100T4:
case ELINK_LINK_100TXFD:
vars->line_speed = ELINK_SPEED_100;
@@ -4378,14 +4378,14 @@ static void elink_sync_link(struct elink_params *params,
case ELINK_LINK_1000THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_1000TFD:
vars->line_speed = ELINK_SPEED_1000;
break;
case ELINK_LINK_2500THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ /* Fall through */
case ELINK_LINK_2500TFD:
vars->line_speed = ELINK_SPEED_2500;
break;
@@ -6341,7 +6341,7 @@ elink_status_t elink_link_update(struct elink_params * params,
* hence its link is expected to be down
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
- * - DEFAULT should be overriden during initialiazation
+ * - DEFAULT should be overridden during initialization
*/
PMD_DRV_LOG(DEBUG, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!",
@@ -12680,7 +12680,7 @@ static void elink_check_over_curr(struct elink_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-/* Returns 0 if no change occured since last check; 1 otherwise. */
+/* Returns 0 if no change occurred since last check; 1 otherwise. */
static uint8_t elink_analyze_link_error(struct elink_params *params,
struct elink_vars *vars,
uint32_t status, uint32_t phy_flag,
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index 0fffe356..b03f65dc 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -38,6 +38,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_bnxt.a
+EXPORT_MAP := rte_pmd_bnxt_version.map
+
LIBABIVER := 1
CFLAGS += -O3
@@ -60,10 +62,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c
#
# Export include files
#
SYMLINK-y-include +=
+SYMLINK-$(CONFIG_RTE_LIBRTE_BNXT_PMD)-include := rte_pmd_bnxt.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 4418c7fd..405d94de 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -35,8 +35,10 @@
#define _BNXT_H_
#include <inttypes.h>
+#include <stdbool.h>
#include <sys/queue.h>
+#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_memory.h>
#include <rte_lcore.h>
@@ -44,8 +46,44 @@
#include "bnxt_cpr.h"
-#define BNXT_MAX_MTU 9000
+#define BNXT_MAX_MTU 9500
#define VLAN_TAG_SIZE 4
+#define BNXT_MAX_LED 4
+
+struct bnxt_led_info {
+ uint8_t led_id;
+ uint8_t led_type;
+ uint8_t led_group_id;
+ uint8_t unused;
+ uint16_t led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \
+ rte_cpu_to_le_16(HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT))
+
+ uint16_t led_color_caps;
+};
+
+struct bnxt_led_cfg {
+ uint8_t led_id;
+ uint8_t led_state;
+ uint8_t led_color;
+ uint8_t unused;
+ uint16_t led_blink_on;
+ uint16_t led_blink_off;
+ uint8_t led_group_id;
+ uint8_t rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA \
+ (HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF | \
+ HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT 6
+
+#define BNXT_LED_DFLT_ENABLES(x) \
+ rte_cpu_to_le_32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
enum bnxt_hw_context {
HW_CONTEXT_NONE = 0,
@@ -54,17 +92,32 @@ enum bnxt_hw_context {
HW_CONTEXT_IS_LB = 3,
};
-struct bnxt_vf_info {
- uint16_t fw_fid;
- uint8_t mac_addr[ETHER_ADDR_LEN];
- uint16_t max_rsscos_ctx;
- uint16_t max_cp_rings;
- uint16_t max_tx_rings;
- uint16_t max_rx_rings;
- uint16_t max_l2_ctx;
- uint16_t max_vnics;
- uint16_t vlan;
- struct bnxt_pf_info *pf;
+struct bnxt_vlan_table_entry {
+ uint16_t tpid;
+ uint16_t vid;
+} __attribute__((packed));
+
+struct bnxt_vlan_antispoof_table_entry {
+ uint16_t tpid;
+ uint16_t vid;
+ uint16_t mask;
+} __attribute__((packed));
+
+struct bnxt_child_vf_info {
+ void *req_buf;
+ struct bnxt_vlan_table_entry *vlan_table;
+ struct bnxt_vlan_antispoof_table_entry *vlan_as_table;
+ STAILQ_HEAD(, bnxt_filter_info) filter;
+ uint32_t func_cfg_flags;
+ uint32_t l2_rx_mask;
+ uint16_t fid;
+ uint16_t max_tx_rate;
+ uint16_t dflt_vlan;
+ uint16_t vlan_count;
+ uint8_t mac_spoof_en;
+ uint8_t vlan_spoof_en;
+ bool random_mac;
+ bool persist_stats;
};
struct bnxt_pf_info {
@@ -73,22 +126,20 @@ struct bnxt_pf_info {
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
- uint32_t fw_fid;
uint8_t port_id;
- uint8_t mac_addr[ETHER_ADDR_LEN];
- uint16_t max_rsscos_ctx;
- uint16_t max_cp_rings;
- uint16_t max_tx_rings;
- uint16_t max_rx_rings;
- uint16_t max_l2_ctx;
- uint16_t max_vnics;
uint16_t first_vf_id;
uint16_t active_vfs;
uint16_t max_vfs;
+ uint32_t func_cfg_flags;
void *vf_req_buf;
phys_addr_t vf_req_buf_dma_addr;
uint32_t vf_req_fwd[8];
- struct bnxt_vf_info *vf;
+ uint16_t total_vnics;
+ struct bnxt_child_vf_info *vf_info;
+#define BNXT_EVB_MODE_NONE 0
+#define BNXT_EVB_MODE_VEB 1
+#define BNXT_EVB_MODE_VEPA 2
+ uint8_t evb_mode;
};
/* Max wait time is 10 * 100ms = 1s */
@@ -120,6 +171,7 @@ struct bnxt_cos_queue_info {
uint8_t profile;
};
+#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
@@ -129,6 +181,9 @@ struct bnxt {
uint32_t flags;
#define BNXT_FLAG_REGISTERED (1 << 0)
#define BNXT_FLAG_VF (1 << 1)
+#define BNXT_FLAG_PORT_STATS (1 << 2)
+#define BNXT_FLAG_JUMBO (1 << 3)
+#define BNXT_FLAG_SHORT_CMD (1 << 4)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type)
@@ -137,10 +192,16 @@ struct bnxt {
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
struct bnxt_rx_queue **rx_queues;
+ const void *rx_mem_zone;
+ struct rx_port_stats *hw_rx_port_stats;
+ phys_addr_t hw_rx_port_stats_map;
unsigned int tx_nr_rings;
unsigned int tx_cp_nr_rings;
struct bnxt_tx_queue **tx_queues;
+ const void *tx_mem_zone;
+ struct tx_port_stats *hw_tx_port_stats;
+ phys_addr_t hw_tx_port_stats_map;
/* Default completion ring */
struct bnxt_cp_ring_info *def_cp_ring;
@@ -167,6 +228,8 @@ struct bnxt {
uint16_t hwrm_cmd_seq;
void *hwrm_cmd_resp_addr;
phys_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_short_cmd_req_addr;
+ phys_addr_t hwrm_short_cmd_req_dma_addr;
rte_spinlock_t hwrm_lock;
uint16_t max_req_len;
uint16_t max_resp_len;
@@ -174,12 +237,36 @@ struct bnxt {
struct bnxt_link_info link_info;
struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
+ uint16_t fw_fid;
+ uint8_t dflt_mac_addr[ETHER_ADDR_LEN];
+ uint16_t max_rsscos_ctx;
+ uint16_t max_cp_rings;
+ uint16_t max_tx_rings;
+ uint16_t max_rx_rings;
+ uint16_t max_l2_ctx;
+ uint16_t max_vnics;
+ uint16_t max_stat_ctx;
+ uint16_t vlan;
struct bnxt_pf_info pf;
- struct bnxt_vf_info vf;
uint8_t port_partition_type;
uint8_t dev_stopped;
+ uint8_t vxlan_port_cnt;
+ uint8_t geneve_port_cnt;
+ uint16_t vxlan_port;
+ uint16_t geneve_port;
+ uint16_t vxlan_fw_dst_port_id;
+ uint16_t geneve_fw_dst_port_id;
+ uint32_t fw_ver;
+ rte_atomic64_t rx_mbuf_alloc_fail;
+
+ struct bnxt_led_info leds[BNXT_MAX_LED];
+ uint8_t num_leds;
};
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
+int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
+
+#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6
+bool is_bnxt_supported(struct rte_eth_dev *dev);
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 3aedcb8d..68979bc4 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -58,63 +58,123 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_link_update_op(bp->eth_dev, 0);
break;
default:
- RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n", event_id);
+ RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
{
+ struct hwrm_exec_fwd_resp_input *fwreq;
struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
struct input *fwd_cmd;
- uint16_t logical_vf_id, error_code;
+ uint16_t fw_vf_id;
+ uint16_t vf_id;
+ uint16_t req_len;
+ int rc;
- /* Qualify the fwd request */
- if (fwd_cmpl->source_id < bp->pf.first_vf_id) {
- RTE_LOG(ERR, PMD,
- "FWD req's source_id 0x%x > first_vf_id 0x%x\n",
- fwd_cmpl->source_id, bp->pf.first_vf_id);
- error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED;
- goto reject;
- } else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT >
- 128 - sizeof(struct input)) {
- RTE_LOG(ERR, PMD,
- "FWD req's cmd len 0x%x > 108 bytes allowed\n",
- fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT);
- error_code = HWRM_ERR_CODE_INVALID_PARAMS;
- goto reject;
+ if (bp->pf.active_vfs <= 0) {
+ RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+ return;
}
+ /* Qualify the fwd request */
+ fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
+ vf_id = fw_vf_id - bp->pf.first_vf_id;
+
+ req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
+ HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
+ HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
+ if (req_len > sizeof(fwreq->encap_request))
+ req_len = sizeof(fwreq->encap_request);
+
/* Locate VF's forwarded command */
- logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id;
- fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf +
- (logical_vf_id * 128));
-
- /* Provision the request */
- switch (fwd_cmd->req_type) {
- case HWRM_CFA_L2_FILTER_ALLOC:
- case HWRM_CFA_L2_FILTER_FREE:
- case HWRM_CFA_L2_FILTER_CFG:
- case HWRM_CFA_L2_SET_RX_MASK:
- break;
- default:
- error_code = HWRM_ERR_CODE_INVALID_PARAMS;
+ fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf;
+
+ if (fw_vf_id < bp->pf.first_vf_id ||
+ fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
+ RTE_LOG(ERR, PMD,
+ "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
+ fw_vf_id, bp->pf.first_vf_id,
+ (bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
+ bp->pf.first_vf_id, bp->pf.active_vfs);
goto reject;
}
- /* Forward */
- fwd_cmd->target_id = fwd_cmpl->source_id;
- bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd);
- return;
+ if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
+ /*
+ * In older firmware versions, the MAC had to be all zeros for
+ * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
+ * zeros if it's being configured and has been ok'd by caller.
+ */
+ if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
+ struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
+
+ if (vfc->enables &
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
+ bnxt_hwrm_func_vf_mac(bp, vf_id,
+ (const uint8_t *)"\x00\x00\x00\x00\x00");
+ }
+ }
+ if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
+ struct hwrm_cfa_l2_set_rx_mask_input *srm =
+ (void *)fwd_cmd;
+
+ srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
+ srm->num_vlan_tags = rte_cpu_to_le_32(0);
+ srm->mask &= ~rte_cpu_to_le_32(
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
+ }
+ /* Forward */
+ rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to send FWD req VF 0x%x, type 0x%x.\n",
+ fw_vf_id - bp->pf.first_vf_id,
+ rte_le_to_cpu_16(fwd_cmd->req_type));
+ }
+ return;
+ }
reject:
- /* TODO: Encap the reject error resp into the hwrm_err_iput? */
- /* Use the error_code for the reject cmd */
- RTE_LOG(ERR, PMD,
- "Error 0x%x found in the forward request\n", error_code);
+ rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to send REJECT req VF 0x%x, type 0x%x.\n",
+ fw_vf_id - bp->pf.first_vf_id,
+ rte_le_to_cpu_16(fwd_cmd->req_type));
+ }
+
+ return;
}
/* For the default completion ring only */
+int bnxt_alloc_def_cp_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ int rc;
+
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ 0, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
+ if (BNXT_PF(bp))
+ rc = bnxt_hwrm_func_cfg_def_cp(bp);
+ else
+ rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
+
+err_out:
+ return rc;
+}
+
void bnxt_free_def_cp_ring(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index 83e53761..a6e87858 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -33,6 +33,7 @@
#ifndef _BNXT_CPR_H_
#define _BNXT_CPR_H_
+#include <stdbool.h>
#include <rte_io.h>
@@ -56,6 +57,19 @@
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
((cpr)->cp_doorbell))
+#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), ((cpr)->cp_doorbell))
+#define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_doorbell) = \
+ DB_KEY_CP | DB_IRQ_DIS)
+
+#define B_CP_DB_IDX_ARM(cpr, cons) \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
+ (cons)))
+
+#define B_CP_DB_IDX_DISARM(cpr, cons) do { \
+ rte_smp_wmb(); \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
+ (cons)); \
+} while (0)
#define B_CP_DIS_DB(cpr, raw_cons) \
rte_write32((DB_CP_FLAGS | \
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
@@ -75,6 +89,8 @@ struct bnxt_cp_ring_info {
uint32_t hw_stats_ctx_id;
struct bnxt_ring *cp_ring_struct;
+ uint16_t cp_cons;
+ bool v;
};
#define RX_CMP_L2_ERRORS \
@@ -82,6 +98,7 @@ struct bnxt_cp_ring_info {
struct bnxt;
+int bnxt_alloc_def_cp_ring(struct bnxt *bp);
void bnxt_free_def_cp_ring(struct bnxt *bp);
int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 81711e48..c9d11228 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -60,6 +60,7 @@ static const char bnxt_version[] =
#define PCI_VENDOR_ID_BROADCOM 0x14E4
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
#define BROADCOM_DEV_ID_57414_VF 0x16c1
#define BROADCOM_DEV_ID_57301 0x16c8
@@ -96,6 +97,8 @@ static const char bnxt_version[] =
#define BROADCOM_DEV_ID_57416_MF 0x16ee
static const struct rte_pci_id bnxt_pci_id_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
+ BROADCOM_DEV_ID_STRATUS_NIC_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
@@ -141,6 +144,8 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
+static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+
/***********************/
/*
@@ -198,6 +203,14 @@ static int bnxt_init_chip(struct bnxt *bp)
struct rte_eth_link new;
int rc;
+ if (bp->eth_dev->data->mtu > ETHER_MTU) {
+ bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ bp->flags |= BNXT_FLAG_JUMBO;
+ } else {
+ bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ }
+
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
@@ -228,28 +241,31 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
- rc);
+ RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
+ i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
- "HWRM vnic ctx alloc failure rc: %x\n", rc);
+ "HWRM vnic %d ctx alloc failure rc: %x\n",
+ i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
+ RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
+ i, rc);
goto err_out;
}
rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
- rc);
+ RTE_LOG(ERR, PMD,
+ "HWRM vnic %d filter failure rc: %x\n",
+ i, rc);
goto err_out;
}
if (vnic->rss_table && vnic->hash_type) {
@@ -269,13 +285,20 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
- "HWRM vnic set RSS failure rc: %x\n",
- rc);
+ "HWRM vnic %d set RSS failure rc: %x\n",
+ i, rc);
goto err_out;
}
}
+
+ bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
+ else
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
}
- rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
RTE_LOG(ERR, PMD,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
@@ -338,25 +361,19 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* MAC Specifics */
dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
dev_info->max_hash_mac_addrs = 0;
/* PF/VF specifics */
- if (BNXT_PF(bp)) {
- dev_info->max_rx_queues = bp->pf.max_rx_rings;
- dev_info->max_tx_queues = bp->pf.max_tx_rings;
- dev_info->max_vfs = bp->pf.active_vfs;
- dev_info->reta_size = bp->pf.max_rsscos_ctx;
- max_vnics = bp->pf.max_vnics;
- } else {
- dev_info->max_rx_queues = bp->vf.max_rx_rings;
- dev_info->max_tx_queues = bp->vf.max_tx_rings;
- dev_info->reta_size = bp->vf.max_rsscos_ctx;
- max_vnics = bp->vf.max_vnics;
- }
+ if (BNXT_PF(bp))
+ dev_info->max_vfs = bp->pdev->max_vfs;
+ dev_info->max_rx_queues = bp->max_rx_rings;
+ dev_info->max_tx_queues = bp->max_tx_rings;
+ dev_info->reta_size = bp->max_rsscos_ctx;
+ max_vnics = bp->max_vnics;
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
@@ -366,7 +383,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
/* *INDENT-OFF* */
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -485,44 +507,29 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int vlan_mask = 0;
int rc;
bp->dev_stopped = 0;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
- rc = -1;
- goto error;
- }
-
- rc = bnxt_setup_int(bp);
- if (rc)
- goto error;
-
- rc = bnxt_alloc_mem(bp);
- if (rc)
- goto error;
-
- rc = bnxt_request_int(bp);
- if (rc)
- goto error;
rc = bnxt_init_nic(bp);
if (rc)
goto error;
- bnxt_enable_int(bp);
-
bnxt_link_update_op(eth_dev, 0);
+
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ vlan_mask |= ETH_VLAN_FILTER_MASK;
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ vlan_mask |= ETH_VLAN_STRIP_MASK;
+ bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
+
return 0;
error:
bnxt_shutdown_nic(bp);
- bnxt_disable_int(bp);
- bnxt_free_int(bp);
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
- bnxt_free_mem(bp);
return rc;
}
@@ -554,8 +561,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_link.link_status = 0;
}
bnxt_set_hwrm_link_config(bp, false);
- bnxt_disable_int(bp);
- bnxt_free_int(bp);
+ bnxt_hwrm_port_clr_stats(bp);
bnxt_shutdown_nic(bp);
bp->dev_stopped = 1;
}
@@ -651,7 +657,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
- return bnxt_hwrm_set_filter(bp, vnic, filter);
+ return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
@@ -669,7 +675,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
RTE_LOG(ERR, PMD,
- "Failed to retrieve link rc = 0x%x!", rc);
+ "Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
@@ -700,7 +706,7 @@ static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags |= BNXT_VNIC_INFO_PROMISC;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
@@ -714,7 +720,7 @@ static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
@@ -728,7 +734,7 @@ static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
@@ -742,7 +748,7 @@ static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
vnic = &bp->vnic_info[0];
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
- bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
@@ -918,7 +924,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
}
static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
- struct rte_eth_fc_conf *fc_conf __rte_unused)
+ struct rte_eth_fc_conf *fc_conf)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_link link_info;
@@ -1003,6 +1009,514 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
return bnxt_set_hwrm_link_config(bp, true);
}
+/* Add UDP tunneling port */
+static int
+bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t tunnel_type = 0;
+ int rc = 0;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (bp->vxlan_port_cnt) {
+ RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ udp_tunnel->udp_port);
+ if (bp->vxlan_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ return -ENOSPC;
+ }
+ bp->vxlan_port_cnt++;
+ return 0;
+ }
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
+ bp->vxlan_port_cnt++;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (bp->geneve_port_cnt) {
+ RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ udp_tunnel->udp_port);
+ if (bp->geneve_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ return -ENOSPC;
+ }
+ bp->geneve_port_cnt++;
+ return 0;
+ }
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
+ bp->geneve_port_cnt++;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ return -ENOTSUP;
+ }
+ rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
+ tunnel_type);
+ return rc;
+}
+
+static int
+bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t tunnel_type = 0;
+ uint16_t port = 0;
+ int rc = 0;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (!bp->vxlan_port_cnt) {
+ RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ return -EINVAL;
+ }
+ if (bp->vxlan_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ udp_tunnel->udp_port, bp->vxlan_port);
+ return -EINVAL;
+ }
+ if (--bp->vxlan_port_cnt)
+ return 0;
+
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
+ port = bp->vxlan_fw_dst_port_id;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (!bp->geneve_port_cnt) {
+ RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ return -EINVAL;
+ }
+ if (bp->geneve_port != udp_tunnel->udp_port) {
+ RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ udp_tunnel->udp_port, bp->geneve_port);
+ return -EINVAL;
+ }
+ if (--bp->geneve_port_cnt)
+ return 0;
+
+ tunnel_type =
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
+ port = bp->geneve_fw_dst_port_id;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
+ if (!rc) {
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_port = 0;
+ if (tunnel_type ==
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
+ bp->geneve_port = 0;
+ }
+ return rc;
+}
+
+static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+ struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+ int rc = 0;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+
+ /* Cycle through all VNICs */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ /*
+ * For each VNIC and each associated filter(s)
+ * if VLAN exists && VLAN matches vlan_id
+ * remove the MAC+VLAN filter
+ * add a new MAC only filter
+ * else
+ * VLAN filter doesn't exist, just skip and continue
+ */
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk &&
+ filter->l2_ovlan == vlan_id) {
+ /* Must delete the filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_filter(bp, filter);
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+
+ /*
+ * Need to examine to see if the MAC
+ * filter already existed or not before
+ * allocating a new one
+ */
+
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ RTE_LOG(ERR, PMD,
+ "MAC/VLAN filter alloc failed\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter,
+ new_filter, next);
+ /* Inherit MAC from previous filter */
+ new_filter->mac_index =
+ filter->mac_index;
+ memcpy(new_filter->l2_addr,
+ filter->l2_addr, ETHER_ADDR_LEN);
+ /* MAC only filter */
+ rc = bnxt_hwrm_set_filter(bp,
+ vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ RTE_LOG(INFO, PMD,
+ "Del Vlan filter for %d\n",
+ vlan_id);
+ }
+ filter = temp_filter;
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+ struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+ int rc = 0;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+
+ /* Cycle through all VNICs */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ /*
+ * For each VNIC and each associated filter(s)
+ * if VLAN exists:
+ * if VLAN matches vlan_id
+ * VLAN filter already exists, just skip and continue
+ * else
+ * add a new MAC+VLAN filter
+ * else
+ * Remove the old MAC only filter
+ * Add a new MAC+VLAN filter
+ */
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk) {
+ if (filter->l2_ovlan == vlan_id)
+ goto cont;
+ } else {
+ /* Must delete the MAC filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_filter(bp, filter);
+ filter->l2_ovlan = 0;
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+ }
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ RTE_LOG(ERR, PMD,
+ "MAC/VLAN filter alloc failed\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
+ next);
+ /* Inherit MAC from the previous filter */
+ new_filter->mac_index = filter->mac_index;
+ memcpy(new_filter->l2_addr, filter->l2_addr,
+ ETHER_ADDR_LEN);
+ /* MAC + VLAN ID filter */
+ new_filter->l2_ovlan = vlan_id;
+ new_filter->l2_ovlan_mask = 0xF000;
+ new_filter->enables |= en;
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ RTE_LOG(INFO, PMD,
+ "Added Vlan filter for %d\n", vlan_id);
+cont:
+ filter = temp_filter;
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ /* These operations apply to ALL existing MAC/VLAN filters */
+ if (on)
+ return bnxt_add_vlan_filter(bp, vlan_id);
+ else
+ return bnxt_del_vlan_filter(bp, vlan_id);
+}
+
+static void
+bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ unsigned int i;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
+ /* Remove any VLAN filters programmed */
+ for (i = 0; i < 4095; i++)
+ bnxt_del_vlan_filter(bp, i);
+ }
+ RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
+ dev->data->dev_conf.rxmode.hw_vlan_filter);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ vnic->vlan_strip = true;
+ else
+ vnic->vlan_strip = false;
+ bnxt_hwrm_vnic_cfg(bp, vnic);
+ }
+ RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
+ dev->data->dev_conf.rxmode.hw_vlan_strip);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+}
+
+static void
+bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ /* Default Filter is tied to VNIC 0 */
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_filter_info *filter;
+ int rc;
+
+ if (BNXT_VF(bp))
+ return;
+
+ memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
+ memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ /* Default Filter is at Index 0 */
+ if (filter->mac_index != 0)
+ continue;
+ rc = bnxt_hwrm_clear_filter(bp, filter);
+ if (rc)
+ break;
+ memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables |=
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+ if (rc)
+ break;
+ filter->mac_index = 0;
+ RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ }
+}
+
+static int
+bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ char *mc_addr_list = (char *)mc_addr_set;
+ struct bnxt_vnic_info *vnic;
+ uint32_t off = 0, i = 0;
+
+ vnic = &bp->vnic_info[0];
+
+ if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
+ vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ goto allmulti;
+ }
+
+ /* TODO Check for Duplicate mcast addresses */
+ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+ for (i = 0; i < nb_mc_addr; i++) {
+ memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
+ off += ETHER_ADDR_LEN;
+ }
+
+ vnic->mc_addr_cnt = i;
+
+allmulti:
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+static int
+bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
+ uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
+ uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
+ int ret;
+
+ ret = snprintf(fw_version, fw_size, "%d.%d.%d",
+ fw_major, fw_minor, fw_updt);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void
+bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct bnxt_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = 0;
+ qinfo->conf.rx_deferred_start = 0;
+}
+
+static void
+bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct bnxt_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+{
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ uint32_t max_dev_mtu;
+ uint32_t rc = 0;
+ uint32_t i;
+
+ bnxt_dev_info_get_op(eth_dev, &dev_info);
+ max_dev_mtu = dev_info.max_rx_pktlen -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
+
+ if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
+ RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ ETHER_MIN_MTU, max_dev_mtu);
+ return -EINVAL;
+ }
+
+
+ if (new_mtu > ETHER_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ } else {
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ }
+
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+
+ eth_dev->data->mtu = new_mtu;
+ RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ break;
+
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static int
+bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint16_t vlan = bp->vlan;
+ int rc;
+
+ if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "PVID cannot be modified for this function\n");
+ return -ENOTSUP;
+ }
+ bp->vlan = on ? pvid : 0;
+
+ rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
+ if (rc)
+ bp->vlan = vlan;
+ return rc;
+}
+
+static int
+bnxt_dev_led_on_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, true);
+}
+
+static int
+bnxt_dev_led_off_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ return bnxt_hwrm_port_led_cfg(bp, false);
+}
+
/*
* Initialization
*/
@@ -1034,6 +1548,22 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.mac_addr_remove = bnxt_mac_addr_remove_op,
.flow_ctrl_get = bnxt_flow_ctrl_get_op,
.flow_ctrl_set = bnxt_flow_ctrl_set_op,
+ .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op,
+ .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op,
+ .vlan_filter_set = bnxt_vlan_filter_set_op,
+ .vlan_offload_set = bnxt_vlan_offload_set_op,
+ .vlan_pvid_set = bnxt_vlan_pvid_set_op,
+ .mtu_set = bnxt_mtu_set_op,
+ .mac_addr_set = bnxt_set_default_mac_addr_op,
+ .xstats_get = bnxt_dev_xstats_get_op,
+ .xstats_get_names = bnxt_dev_xstats_get_names_op,
+ .xstats_reset = bnxt_dev_xstats_reset_op,
+ .fw_version_get = bnxt_fw_version_get,
+ .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
+ .rxq_info_get = bnxt_rxq_info_get_op,
+ .txq_info_get = bnxt_txq_info_get_op,
+ .dev_led_on = bnxt_dev_led_on_op,
+ .dev_led_off = bnxt_dev_led_off_op,
};
static bool bnxt_vf_pciid(uint16_t id)
@@ -1041,7 +1571,9 @@ static bool bnxt_vf_pciid(uint16_t id)
if (id == BROADCOM_DEV_ID_57304_VF ||
id == BROADCOM_DEV_ID_57406_VF ||
id == BROADCOM_DEV_ID_5731X_VF ||
- id == BROADCOM_DEV_ID_5741X_VF)
+ id == BROADCOM_DEV_ID_5741X_VF ||
+ id == BROADCOM_DEV_ID_57414_VF ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
return true;
return false;
}
@@ -1049,7 +1581,7 @@ static bool bnxt_vf_pciid(uint16_t id)
static int bnxt_init_board(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
int rc;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
@@ -1082,22 +1614,35 @@ init_err_disable:
static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+#define ALLOW_FUNC(x) \
+ { \
+ typeof(x) arg = (x); \
+ bp->pf.vf_req_fwd[((arg) >> 5)] &= \
+ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
+ }
static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
static int version_printed;
+ uint32_t total_alloc_len;
+ phys_addr_t mz_phys_addr;
struct bnxt *bp;
int rc;
if (version_printed++ == 0)
- RTE_LOG(INFO, PMD, "%s", bnxt_version);
+ RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
bp = eth_dev->data->dev_private;
+ rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
+ bp->dev_stopped = 1;
+
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
@@ -1111,6 +1656,80 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+ if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "rx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct rx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->rx_mem_zone = (const void *)mz;
+ bp->hw_rx_port_stats = mz->addr;
+ bp->hw_rx_port_stats_map = mz_phys_addr;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
+ pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function, "tx_port_stats");
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct tx_port_stats) + 512);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ bp->tx_mem_zone = (const void *)mz;
+ bp->hw_tx_port_stats = mz->addr;
+ bp->hw_tx_port_stats_map = mz_phys_addr;
+
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+ }
+
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
RTE_LOG(ERR, PMD,
@@ -1130,6 +1749,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
+ if (bp->max_tx_rings == 0) {
+ RTE_LOG(ERR, PMD, "No TX rings available!\n");
+ rc = -EBUSY;
+ goto error_free;
+ }
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
if (eth_dev->data->mac_addrs == NULL) {
@@ -1140,10 +1764,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
}
/* Copy the permanent MAC from the qcap response address now. */
- if (BNXT_PF(bp))
- memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
- else
- memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
+ memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
@@ -1155,8 +1776,29 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
}
- rc = bnxt_hwrm_func_driver_register(bp, 0,
- bp->pf.vf_req_fwd);
+ /* Forward all requests if firmware is new enough */
+ if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
+ (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
+ ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
+ memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
+ } else {
+ RTE_LOG(WARNING, PMD,
+ "Firmware too old for VF mailbox functionality\n");
+ memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
+ }
+
+ /*
+ * The following are used for driver cleanup. If we disallow these,
+ * VF drivers can't clean up cleanly.
+ */
+ ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
+ ALLOW_FUNC(HWRM_VNIC_FREE);
+ ALLOW_FUNC(HWRM_RING_FREE);
+ ALLOW_FUNC(HWRM_RING_GRP_FREE);
+ ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
+ ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
+ ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+ rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
RTE_LOG(ERR, PMD,
"Failed to register driver");
@@ -1169,10 +1811,61 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
- bp->dev_stopped = 0;
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+ rc = -1;
+ goto error_free;
+ }
+
+ if (BNXT_PF(bp)) {
+ //if (bp->pf.active_vfs) {
+ // TODO: Deallocate VF resources?
+ //}
+ if (bp->pdev->max_vfs) {
+ rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+ goto error_free;
+ }
+ } else {
+ rc = bnxt_hwrm_allocate_pf_only(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to allocate PF resources\n");
+ goto error_free;
+ }
+ }
+ }
+
+ bnxt_hwrm_port_led_qcaps(bp);
+
+ rc = bnxt_setup_int(bp);
+ if (rc)
+ goto error_free;
+
+ rc = bnxt_alloc_mem(bp);
+ if (rc)
+ goto error_free_int;
+
+ rc = bnxt_request_int(bp);
+ if (rc)
+ goto error_free_int;
+
+ rc = bnxt_alloc_def_cp_ring(bp);
+ if (rc)
+ goto error_free_int;
+
+ bnxt_enable_int(bp);
return 0;
+error_free_int:
+ bnxt_disable_int(bp);
+ bnxt_free_def_cp_ring(bp);
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ bnxt_free_int(bp);
+ bnxt_free_mem(bp);
error_free:
bnxt_dev_uninit(eth_dev);
error:
@@ -1184,6 +1877,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
struct bnxt *bp = eth_dev->data->dev_private;
int rc;
+ bnxt_disable_int(bp);
+ bnxt_free_int(bp);
+ bnxt_free_mem(bp);
if (eth_dev->data->mac_addrs != NULL) {
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -1194,8 +1890,12 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
}
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bnxt_free_hwrm_resources(bp);
+ rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
+ rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
if (bp->dev_stopped == 0)
bnxt_dev_close_op(eth_dev);
+ if (bp->pf.vf_info)
+ rte_free(bp->pf.vf_info);
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
@@ -1223,6 +1923,20 @@ static struct rte_pci_driver bnxt_rte_pmd = {
.remove = bnxt_pci_remove,
};
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->device->driver->name, drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool is_bnxt_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &bnxt_rte_pmd);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index df1042cf..e9aac271 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -68,20 +68,28 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
return filter;
}
-void bnxt_init_filters(struct bnxt *bp)
+struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
{
struct bnxt_filter_info *filter;
- int i, max_filters;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
+ filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
+ vf);
+ return NULL;
+ }
- max_filters = pf->max_l2_ctx;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
+ filter->fw_l2_filter_id = UINT64_MAX;
+ STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
+ return filter;
+}
- max_filters = vf->max_l2_ctx;
- }
+void bnxt_init_filters(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+ int i, max_filters;
+
+ max_filters = bp->max_l2_ctx;
STAILQ_INIT(&bp->free_filter_list);
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
@@ -110,6 +118,12 @@ void bnxt_free_all_filters(struct bnxt *bp)
STAILQ_INIT(&vnic->filter);
}
}
+
+ for (i = 0; i < bp->pf.max_vfs; i++) {
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
+ bnxt_hwrm_clear_filter(bp, filter);
+ }
+ }
}
void bnxt_free_filter_mem(struct bnxt *bp)
@@ -122,15 +136,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
return;
/* Ensure that all filters are freed */
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_filters = pf->max_l2_ctx;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_filters = vf->max_l2_ctx;
- }
+ max_filters = bp->max_l2_ctx;
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
@@ -142,7 +148,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
"HWRM filter cannot be freed rc = %d\n",
rc);
}
- filter->fw_l2_filter_id = -1;
+ filter->fw_l2_filter_id = UINT64_MAX;
}
STAILQ_INIT(&bp->free_filter_list);
@@ -155,15 +161,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
struct bnxt_filter_info *filter_mem;
uint16_t max_filters;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_filters = pf->max_l2_ctx;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_filters = vf->max_l2_ctx;
- }
+ max_filters = bp->max_l2_ctx;
/* Allocate memory for VNIC pool and filter pool */
filter_mem = rte_zmalloc("bnxt_filter_info",
max_filters * sizeof(struct bnxt_filter_info),
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 06fe134a..613b2eea 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -63,9 +63,12 @@ struct bnxt_filter_info {
uint32_t vni;
uint8_t pri_hint;
uint64_t l2_filter_id_hint;
+ uint32_t src_id;
+ uint8_t src_type;
};
struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
+struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf);
void bnxt_init_filters(struct bnxt *bp);
void bnxt_free_all_filters(struct bnxt *bp);
void bnxt_free_filter_mem(struct bnxt *bp);
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d8987234..e710e636 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -31,6 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <unistd.h>
+
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
@@ -54,6 +56,38 @@
#define HWRM_CMD_TIMEOUT 2000
+struct bnxt_plcmodes_cfg {
+ uint32_t flags;
+ uint16_t jumbo_thresh;
+ uint16_t hds_offset;
+ uint16_t hds_threshold;
+};
+
+static int page_getenum(size_t size)
+{
+ if (size <= 1 << 4)
+ return 4;
+ if (size <= 1 << 12)
+ return 12;
+ if (size <= 1 << 13)
+ return 13;
+ if (size <= 1 << 16)
+ return 16;
+ if (size <= 1 << 21)
+ return 21;
+ if (size <= 1 << 22)
+ return 22;
+ if (size <= 1 << 30)
+ return 30;
+ RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+ return sizeof(void *) * 8 - 1;
+}
+
+static int page_roundup(size_t size)
+{
+ return 1 << page_getenum(size);
+}
+
/*
* HWRM Functions (sent to HWRM)
* These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
@@ -70,6 +104,30 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
uint32_t *data = msg;
uint8_t *bar;
uint8_t *valid;
+ uint16_t max_req_len = bp->max_req_len;
+ struct hwrm_short_input short_input = { 0 };
+
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+
+ memset(short_cmd_req, 0, bp->max_req_len);
+ memcpy(short_cmd_req, req, msg_len);
+
+ short_input.req_type = rte_cpu_to_le_16(req->req_type);
+ short_input.signature = rte_cpu_to_le_16(
+ HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
+ short_input.size = rte_cpu_to_le_16(msg_len);
+ short_input.req_addr =
+ rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
+
+ data = (uint32_t *)&short_input;
+ msg_len = sizeof(short_input);
+
+ /* Sync memory write before updating doorbell */
+ rte_wmb();
+
+ max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
+ }
/* Write request msg to hwrm channel */
for (i = 0; i < msg_len; i += 4) {
@@ -79,7 +137,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
}
/* Zero the rest of the request space */
- for (; i < bp->max_req_len; i += 4) {
+ for (; i < max_req_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
rte_write32(0, bar);
}
@@ -103,7 +161,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
}
if (i >= HWRM_CMD_TIMEOUT) {
- RTE_LOG(ERR, PMD, "Error sending msg %x\n",
+ RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@@ -140,7 +198,22 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
} \
if (resp->error_code) { \
rc = rte_le_to_cpu_16(resp->error_code); \
- RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
+ if (resp->resp_len >= 16) { \
+ struct hwrm_err_output *tmp_hwrm_err_op = \
+ (void *)resp; \
+ RTE_LOG(ERR, PMD, \
+ "%s error %d:%d:%08x:%04x\n", \
+ __func__, \
+ rc, tmp_hwrm_err_op->cmd_err, \
+ rte_le_to_cpu_32(\
+ tmp_hwrm_err_op->opaque_0), \
+ rte_le_to_cpu_16(\
+ tmp_hwrm_err_op->opaque_1)); \
+ } \
+ else { \
+ RTE_LOG(ERR, PMD, \
+ "%s error %d\n", __func__, rc); \
+ } \
return rc; \
} \
}
@@ -162,7 +235,10 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
-int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ uint16_t vlan_count,
+ struct bnxt_vlan_table_entry *vlan_table)
{
int rc = 0;
struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
@@ -175,10 +251,28 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* FIXME add multicast flag, when multicast adding options is supported
* by ethtool.
*/
+ if (vnic->flags & BNXT_VNIC_INFO_BCAST)
+ mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ if (vnic->flags & BNXT_VNIC_INFO_MCAST)
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+ if (vnic->mc_addr_cnt) {
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
+ req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
+ req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
+ }
+ if (vlan_table) {
+ if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
+ req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
+ rte_mem_virt2phy(vlan_table));
+ req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
+ }
req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
mask);
@@ -189,6 +283,44 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
+int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
+ uint16_t vlan_count,
+ struct bnxt_vlan_antispoof_table_entry *vlan_table)
+{
+ int rc = 0;
+ struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
+ struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ /*
+ * Older HWRM versions did not support this command, and the set_rx_mask
+ * list was used for anti-spoof. In 1.8.0, the TX path configuration was
+ * removed from set_rx_mask call, and this command was added.
+ *
+ * This command is also present from 1.7.8.11 and higher,
+ * as well as 1.7.8.0
+ */
+ if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
+ if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
+ if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
+ (11)))
+ return 0;
+ }
+ }
+ HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(fid);
+
+ req.vlan_tag_mask_tbl_addr =
+ rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+ req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct bnxt_filter_info *filter)
{
@@ -196,6 +328,9 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
+ if (filter->fw_l2_filter_id == UINT64_MAX)
+ return 0;
+
HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
@@ -210,7 +345,7 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
}
int bnxt_hwrm_set_filter(struct bnxt *bp,
- struct bnxt_vnic_info *vnic,
+ uint16_t dst_id,
struct bnxt_filter_info *filter)
{
int rc = 0;
@@ -218,13 +353,16 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ if (filter->fw_l2_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_filter(bp, filter);
+
HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
req.flags = rte_cpu_to_le_32(filter->flags);
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
- req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.dst_id = rte_cpu_to_le_16(dst_id);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
@@ -240,6 +378,10 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
+ if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
+ req.src_id = rte_cpu_to_le_32(filter->src_id);
+ if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
+ req.src_type = filter->src_type;
req.enables = rte_cpu_to_le_32(enables);
@@ -252,29 +394,13 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
return rc;
}
-int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
-{
- int rc;
- struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
- struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
-
- HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
-
- memcpy(req.encap_request, fwd_cmd,
- sizeof(req.encap_request));
-
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-
- HWRM_CHECK_RESULT;
-
- return rc;
-}
-
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {.req_type = 0 };
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t new_max_vfs;
+ int i;
HWRM_PREP(req, FUNC_QCAPS, -1, resp);
@@ -286,31 +412,63 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- pf->fw_fid = rte_le_to_cpu_32(resp->fid);
- pf->port_id = resp->port_id;
- memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
- pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
- pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
- pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
- pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
- pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
- pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
- pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
+ bp->pf.port_id = resp->port_id;
+ bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ new_max_vfs = bp->pdev->max_vfs;
+ if (new_max_vfs != bp->pf.max_vfs) {
+ if (bp->pf.vf_info)
+ rte_free(bp->pf.vf_info);
+ bp->pf.vf_info = rte_malloc("bnxt_vf_info",
+ sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
+ bp->pf.max_vfs = new_max_vfs;
+ for (i = 0; i < new_max_vfs; i++) {
+ bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
+ bp->pf.vf_info[i].vlan_table =
+ rte_zmalloc("VF VLAN table",
+ getpagesize(),
+ getpagesize());
+ if (bp->pf.vf_info[i].vlan_table == NULL)
+ RTE_LOG(ERR, PMD,
+ "Fail to alloc VLAN table for VF %d\n",
+ i);
+ else
+ rte_mem_lock_page(
+ bp->pf.vf_info[i].vlan_table);
+ bp->pf.vf_info[i].vlan_as_table =
+ rte_zmalloc("VF VLAN AS table",
+ getpagesize(),
+ getpagesize());
+ if (bp->pf.vf_info[i].vlan_as_table == NULL)
+ RTE_LOG(ERR, PMD,
+ "Alloc VLAN AS table for VF %d fail\n",
+ i);
+ else
+ rte_mem_lock_page(
+ bp->pf.vf_info[i].vlan_as_table);
+ STAILQ_INIT(&bp->pf.vf_info[i].filter);
+ }
+ }
+ }
- vf->fw_fid = rte_le_to_cpu_32(resp->fid);
- memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
- vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
- vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
- vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
- vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
- vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
- vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->fw_fid = rte_le_to_cpu_32(resp->fid);
+ memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ /* TODO: For now, do not support VMDq/RFS on VFs. */
+ if (BNXT_PF(bp)) {
+ if (bp->pf.max_vfs)
+ bp->max_vnics = 1;
+ else
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ } else {
+ bp->max_vnics = 1;
}
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ if (BNXT_PF(bp))
+ bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
return rc;
}
@@ -332,8 +490,7 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
return rc;
}
-int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
- uint32_t *vf_req_fwd)
+int bnxt_hwrm_func_driver_register(struct bnxt *bp)
{
int rc;
struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
@@ -343,16 +500,22 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
return 0;
HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
- req.flags = flags;
- req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
- HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
req.ver_maj = RTE_VER_YEAR;
req.ver_min = RTE_VER_MONTH;
req.ver_upd = RTE_VER_MINOR;
- memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
+ if (BNXT_PF(bp)) {
+ req.enables |= rte_cpu_to_le_32(
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
+ memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
+ RTE_MIN(sizeof(req.vf_req_fwd),
+ sizeof(bp->pf.vf_req_fwd)));
+ }
req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
+ memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -372,7 +535,9 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
uint32_t fw_version;
uint16_t max_resp_len;
char type[RTE_MEMZONE_NAMESIZE];
+ uint32_t dev_caps_cfg;
+ bp->max_req_len = HWRM_MAX_REQ_LEN;
HWRM_PREP(req, VER_GET, -1, resp);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
@@ -391,6 +556,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
+ bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
+ (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
@@ -427,8 +594,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
RTE_LOG(ERR, PMD, "Unsupported request length\n");
rc = -EINVAL;
}
- bp->max_req_len = resp->max_req_win_len;
+ bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
max_resp_len = resp->max_resp_len;
+ dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
+
if (bp->max_resp_len != max_resp_len) {
sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
bp->pdev->addr.domain, bp->pdev->addr.bus,
@@ -441,11 +610,46 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rc = -ENOMEM;
goto error;
}
+ rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "Unable to map response buffer to physical memory.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
bp->max_resp_len = max_resp_len;
}
+ if ((dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
+ RTE_LOG(DEBUG, PMD, "Short command supported\n");
+
+ rte_free(bp->hwrm_short_cmd_req_addr);
+
+ bp->hwrm_short_cmd_req_addr = rte_malloc(type,
+ bp->max_req_len, 0);
+ if (bp->hwrm_short_cmd_req_addr == NULL) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
+ bp->hwrm_short_cmd_req_dma_addr =
+ rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+ if (bp->hwrm_short_cmd_req_dma_addr == 0) {
+ rte_free(bp->hwrm_short_cmd_req_addr);
+ RTE_LOG(ERR, PMD,
+ "Unable to map buffer to physical memory.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+ }
+
error:
rte_spinlock_unlock(&bp->hwrm_lock);
return rc;
@@ -478,6 +682,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ uint32_t link_speed_mask =
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
@@ -489,14 +695,20 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
* any auto mode, even "none".
*/
if (!conf->link_speed) {
- req.auto_mode |= conf->auto_mode;
- enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
- req.auto_link_speed_mask = conf->auto_link_speed_mask;
- enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
- req.auto_link_speed = bp->link_info.auto_link_speed;
- enables |=
+ req.auto_mode = conf->auto_mode;
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+ if (conf->auto_mode ==
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
+ req.auto_link_speed_mask =
+ conf->auto_link_speed_mask;
+ enables |= link_speed_mask;
+ }
+ if (bp->link_info.auto_link_speed) {
+ req.auto_link_speed =
+ bp->link_info.auto_link_speed;
+ enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
+ }
}
req.auto_duplex = conf->duplex;
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
@@ -511,7 +723,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
req.enables = rte_cpu_to_le_32(enables);
} else {
req.flags =
- rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
+ rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
RTE_LOG(INFO, PMD, "Force Link Down\n");
}
@@ -536,13 +748,10 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
HWRM_CHECK_RESULT;
link_info->phy_link_status = resp->link;
- if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
- link_info->link_up = 1;
- link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
- } else {
- link_info->link_up = 0;
- link_info->link_speed = 0;
- }
+ link_info->link_up =
+ (link_info->phy_link_status ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
+ link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
link_info->duplex = resp->duplex;
link_info->pause = resp->pause;
link_info->auto_pause = resp->auto_pause;
@@ -590,20 +799,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id)
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
{
int rc = 0;
+ uint32_t enables = 0;
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, RING_ALLOC, -1, resp);
- req.enables = rte_cpu_to_le_32(0);
-
req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
req.fbo = rte_cpu_to_le_32(0);
/* Association of ring index with doorbell index */
req.logical_id = rte_cpu_to_le_16(map_index);
+ req.length = rte_cpu_to_le_32(ring->ring_size);
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
@@ -611,27 +820,26 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
/* FALLTHROUGH */
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
- req.cmpl_ring_id =
- rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
- req.length = rte_cpu_to_le_32(ring->ring_size);
+ req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
- req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
- HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
+ if (stats_ctx_id != INVALID_STATS_CTX_ID)
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
break;
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
req.ring_type = ring_type;
/*
* TODO: Some HWRM versions crash with
* HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
*/
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
- req.length = rte_cpu_to_le_32(ring->ring_size);
break;
default:
RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
ring_type);
return -1;
}
+ req.enables = rte_cpu_to_le_32(enables);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -639,7 +847,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
- case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+ case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
return rc;
@@ -680,7 +888,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
- case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+ case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
@@ -747,13 +955,12 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
-
if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
return rc;
+ HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -762,8 +969,8 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return rc;
}
-int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
{
int rc;
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
@@ -771,9 +978,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
- req.update_period_ms = rte_cpu_to_le_32(1000);
+ req.update_period_ms = rte_cpu_to_le_32(0);
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
req.stats_dma_addr =
rte_cpu_to_le_64(cpr->hw_stats_map);
@@ -782,13 +988,12 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
HWRM_CHECK_RESULT;
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
-int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr, unsigned int idx)
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
{
int rc;
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
@@ -797,15 +1002,11 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
- cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
-
return rc;
}
@@ -816,26 +1017,80 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
- for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
- if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
- RTE_LOG(ERR, PMD,
- "Not enough ring groups avail:%x req:%x\n", j,
- (vnic->end_grp_id - vnic->start_grp_id) + 1);
- break;
- }
+ RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ vnic->start_grp_id, vnic->end_grp_id);
+ for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
- }
-
- vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
-
+ vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE;
HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+ if (vnic->func_default)
+ req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ pmode->flags = rte_le_to_cpu_32(resp->flags);
+ /* dflt_vnic bit doesn't exist in the _cfg command */
+ pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
+ pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
+ pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
+ pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_plcmodes_cfg *pmode)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.flags = rte_cpu_to_le_32(pmode->flags);
+ req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
+ req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
+ req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
+ );
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
return rc;
}
@@ -844,32 +1099,105 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ struct bnxt_plcmodes_cfg pmodes;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
+ rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
+ if (rc)
+ return rc;
HWRM_PREP(req, VNIC_CFG, -1, resp);
/* Only RSS support for now TBD: COS & LB */
req.enables =
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
- HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
+ if (vnic->lb_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
+ if (vnic->cos_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
+ if (vnic->rss_rule != 0xffff)
+ ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- req.dflt_ring_grp =
- rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
- req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
- req.cos_rule = rte_cpu_to_le_16(0xffff);
- req.lb_rule = rte_cpu_to_le_16(0xffff);
- req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
+ req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
+ req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
+ req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
+ req.mru = rte_cpu_to_le_16(vnic->mru);
if (vnic->func_default)
- req.flags = 1;
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
if (vnic->vlan_strip)
req.flags |=
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->bd_stall)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
+ if (vnic->roce_dual)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
+ if (vnic->roce_only)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
+ if (vnic->rss_dflt_cr)
+ req.flags |= rte_cpu_to_le_32(
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int16_t fw_vf_id)
+{
+ int rc = 0;
+ struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ return rc;
+ }
+ HWRM_PREP(req, VNIC_QCFG, -1, resp);
+
+ req.enables =
+ rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.vf_id = rte_cpu_to_le_16(fw_vf_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
+ vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
+ vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
+ vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
+ vnic->mru = rte_le_to_cpu_16(resp->mru);
+ vnic->func_default = rte_le_to_cpu_32(
+ resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
+ vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
+ vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
+ vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
+ vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
+ vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
+
return rc;
}
@@ -886,7 +1214,8 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_CHECK_RESULT;
- vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
@@ -898,15 +1227,19 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
bp->hwrm_cmd_resp_addr;
+ if (vnic->rss_rule == 0xffff) {
+ RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ return rc;
+ }
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
- req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+ req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
- vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->rss_rule = INVALID_HW_RING_ID;
return rc;
}
@@ -917,8 +1250,10 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_input req = {.req_type = 0 };
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
- if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
+ }
HWRM_PREP(req, VNIC_FREE, -1, resp);
@@ -947,7 +1282,168 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
rte_cpu_to_le_64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
- req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t size;
+
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
+
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
+
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ req.jumbo_thresh = rte_cpu_to_le_16(size);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable)
+{
+ int rc = 0;
+ struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+
+ if (enable) {
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.max_agg_segs = rte_cpu_to_le_16(5);
+ req.max_aggs =
+ rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+ req.min_agg_len = rte_cpu_to_le_32(512);
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ bp->pf.vf_info[vf].random_mac = false;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+ uint64_t *dropped)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ if (dropped)
+ *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+ struct rte_eth_stats *stats)
+{
+ int rc = 0;
+ struct hwrm_func_qstats_input req = {.req_type = 0};
+ struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+ stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+ stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
+ stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
+
+ stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
+{
+ int rc = 0;
+ struct hwrm_func_clr_stats_input req = {.req_type = 0};
+ struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -992,14 +1488,20 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr;
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
- unsigned int idx = i + 1;
if (i >= bp->rx_cp_nr_rings)
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
else
cpr = bp->rx_queues[i]->cp_ring;
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
- rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ /*
+ * TODO. Need a better way to reset grp_info.stats_ctx
+ * for Rx rings only. stats_ctx is not saved for Tx
+ * in grp_info.
+ */
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
if (rc)
return rc;
}
@@ -1016,7 +1518,6 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
struct bnxt_tx_queue *txq;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
- unsigned int idx = i + 1;
if (i >= bp->rx_cp_nr_rings) {
txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
@@ -1026,7 +1527,7 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
cpr = rxq->cp_ring;
}
- rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
if (rc)
return rc;
@@ -1036,11 +1537,10 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
{
- uint16_t i;
+ uint16_t idx;
uint32_t rc = 0;
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- unsigned int idx = i + 1;
+ for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
RTE_LOG(ERR, PMD,
@@ -1057,13 +1557,13 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
return rc;
}
-static void bnxt_free_cp_ring(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr, unsigned int idx)
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ unsigned int idx __rte_unused)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
bnxt_hwrm_ring_free(bp, cp_ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
+ HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
@@ -1096,8 +1596,10 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
txr->tx_prod = 0;
txr->tx_cons = 0;
}
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, idx);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -1119,17 +1621,26 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_buf_ring));
rxr->rx_prod = 0;
+ memset(rxr->ag_buf_ring, 0,
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
+ rxr->ag_prod = 0;
}
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, idx);
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
}
/* Default completion ring */
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, 0);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
}
return rc;
@@ -1141,14 +1652,7 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
uint32_t rc = 0;
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- unsigned int idx = i + 1;
-
- if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
- bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
- continue;
-
- rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
-
+ rc = bnxt_hwrm_ring_grp_alloc(bp, i);
if (rc)
return rc;
}
@@ -1159,8 +1663,11 @@ void bnxt_free_hwrm_resources(struct bnxt *bp)
{
/* Release memzone */
rte_free(bp->hwrm_cmd_resp_addr);
+ rte_free(bp->hwrm_short_cmd_req_addr);
bp->hwrm_cmd_resp_addr = NULL;
+ bp->hwrm_short_cmd_req_addr = NULL;
bp->hwrm_cmd_resp_dma_addr = 0;
+ bp->hwrm_short_cmd_req_dma_addr = 0;
}
int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -1170,13 +1677,18 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
- bp->max_req_len = HWRM_MAX_REQ_LEN;
bp->max_resp_len = HWRM_MAX_RESP_LEN;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
+ rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
rte_spinlock_init(&bp->hwrm_lock);
return 0;
@@ -1201,13 +1713,25 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
- rc = bnxt_hwrm_set_filter(bp, vnic, filter);
+ rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
break;
}
return rc;
}
+void bnxt_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
+ bp->vxlan_port = 0;
+ if (bp->geneve_port_cnt)
+ bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
+ bp->geneve_port = 0;
+}
+
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
@@ -1217,7 +1741,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
return;
vnic = &bp->vnic_info[0];
- bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
+ if (BNXT_PF(bp))
+ bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
/* VNIC resources */
for (i = 0; i < bp->nr_vnics; i++) {
@@ -1226,12 +1751,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_clear_hwrm_vnic_filters(bp, vnic);
bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
bnxt_hwrm_vnic_free(bp, vnic);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
bnxt_free_all_hwrm_ring_grps(bp);
bnxt_free_all_hwrm_stat_ctxs(bp);
+ bnxt_free_tunnel_ports(bp);
}
static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
@@ -1337,12 +1866,16 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
return 0;
}
-static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
+static uint16_t
+bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
{
uint16_t ret = 0;
- if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (bp->link_info.support_speeds)
+ return bp->link_info.support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
+ }
if (link_speed & ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
@@ -1434,7 +1967,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
"Get link config failed with rc %d\n", rc);
goto exit;
}
- if (link_info->link_up)
+ if (link_info->link_speed)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
@@ -1443,7 +1976,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
- ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
+ ETH_LINK_FIXED : ETH_LINK_AUTONEG;
exit:
return rc;
}
@@ -1476,7 +2009,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
link_req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
link_req.auto_link_speed_mask =
- bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
+ bnxt_parse_eth_link_speed_mask(bp,
+ dev_conf->link_speeds);
} else {
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
link_req.link_speed = speed;
@@ -1493,7 +2027,6 @@ port_phy_cfg:
"Set link config failed with rc %d\n", rc);
}
- rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
error:
return rc;
}
@@ -1512,12 +2045,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
HWRM_CHECK_RESULT;
- if (BNXT_VF(bp)) {
- struct bnxt_vf_info *vf = &bp->vf;
-
- /* Hard Coded.. 0xfff VLAN ID mask */
- vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
- }
+ /* Hard Coded.. 0xfff VLAN ID mask */
+ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -1532,3 +2061,946 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
return rc;
}
+
+static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
+ struct hwrm_func_qcaps_output *qcaps)
+{
+ qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
+ memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
+ sizeof(qcaps->mac_address));
+ qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
+ qcaps->max_rx_rings = fcfg->num_rx_rings;
+ qcaps->max_tx_rings = fcfg->num_tx_rings;
+ qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
+ qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
+ qcaps->max_vfs = 0;
+ qcaps->first_vf_id = 0;
+ qcaps->max_vnics = fcfg->num_vnics;
+ qcaps->max_decap_records = 0;
+ qcaps->max_encap_records = 0;
+ qcaps->max_tx_wm_flows = 0;
+ qcaps->max_tx_em_flows = 0;
+ qcaps->max_rx_wm_flows = 0;
+ qcaps->max_rx_em_flows = 0;
+ qcaps->max_flow_id = 0;
+ qcaps->max_mcast_filters = fcfg->num_mcast_filters;
+ qcaps->max_sp_tx_rings = 0;
+ qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
+}
+
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+ req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
+ req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
+ req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
+ req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
+ req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
+ req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
+ req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+ req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+static void populate_vf_func_cfg_req(struct bnxt *bp,
+ struct hwrm_func_cfg_input *req,
+ int num_vfs)
+{
+ req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+
+ req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
+ (num_vfs + 1));
+ req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+ req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
+ (num_vfs + 1));
+ req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+ req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+ req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+ /* TODO: For now, do not support VMDq/RFS on VFs. */
+ req->num_vnics = rte_cpu_to_le_16(1);
+ req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
+ (num_vfs + 1));
+}
+
+static void add_random_mac_if_needed(struct bnxt *bp,
+ struct hwrm_func_cfg_input *cfg_req,
+ int vf)
+{
+ struct ether_addr mac;
+
+ if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
+ return;
+
+ if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
+ cfg_req->enables |=
+ rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ eth_random_addr(cfg_req->dflt_mac_addr);
+ bp->pf.vf_info[vf].random_mac = true;
+ } else {
+ memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
+ }
+}
+
+static void reserve_resources_from_vf(struct bnxt *bp,
+ struct hwrm_func_cfg_input *cfg_req,
+ int vf)
+{
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Get the actual allocated values now */
+ HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+ copy_func_cfg_to_qcaps(cfg_req, resp);
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+ copy_func_cfg_to_qcaps(cfg_req, resp);
+ }
+
+ bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
+ bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
+ /*
+ * TODO: While not supporting VMDq with VFs, max_vnics is always
+ * forced to 1 in this case
+ */
+ //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
+ bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+}
+
+int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Check for zero MAC address */
+ HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+ return -1;
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+ return -1;
+ }
+ return rte_le_to_cpu_16(resp->vlan);
+}
+
+static int update_pf_resource_max(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* And copy the allocated numbers into the pf struct */
+ HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ /* Only TX ring value reflects actual allocation? TODO */
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+ bp->pf.evb_mode = resp->evb_mode;
+
+ return rc;
+}
+
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
+{
+ int rc;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ return -1;
+ }
+
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ bp->pf.func_cfg_flags &=
+ ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+ bp->pf.func_cfg_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
+ rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ return rc;
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int i;
+ size_t sz;
+ int rc = 0;
+ size_t req_buf_sz;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ return -1;
+ }
+
+ rc = bnxt_hwrm_func_qcaps(bp);
+
+ if (rc)
+ return rc;
+
+ bp->pf.active_vfs = num_vfs;
+
+ /*
+ * First, configure the PF to only use one TX ring. This ensures that
+ * there are enough rings for all VFs.
+ *
+ * If we don't do this, when we call func_alloc() later, we will lock
+ * extra rings to the PF that won't be available during func_cfg() of
+ * the VFs.
+ *
+ * This has been fixed with firmware versions above 20.6.54
+ */
+ bp->pf.func_cfg_flags &=
+ ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+ bp->pf.func_cfg_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+ rc = bnxt_hwrm_pf_func_cfg(bp, 1);
+ if (rc)
+ return rc;
+
+ /*
+ * Now, create and register a buffer to hold forwarded VF requests
+ */
+ req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
+ bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
+ page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
+ if (bp->pf.vf_req_buf == NULL) {
+ rc = -ENOMEM;
+ goto error_free;
+ }
+ for (sz = 0; sz < req_buf_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
+ for (i = 0; i < num_vfs; i++)
+ bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
+ (i * HWRM_MAX_REQ_LEN);
+
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ goto error_free;
+
+ populate_vf_func_cfg_req(bp, &req, num_vfs);
+
+ bp->pf.active_vfs = 0;
+ for (i = 0; i < num_vfs; i++) {
+ add_random_mac_if_needed(bp, &req, i);
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ /* Clear enable flag for next pass */
+ req.enables &= ~rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+
+ if (rc || resp->error_code) {
+ RTE_LOG(ERR, PMD,
+ "Failed to initizlie VF %d\n", i);
+ RTE_LOG(ERR, PMD,
+ "Not all VFs available. (%d, %d)\n",
+ rc, resp->error_code);
+ break;
+ }
+
+ reserve_resources_from_vf(bp, &req, i);
+ bp->pf.active_vfs++;
+ bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
+ }
+
+ /*
+ * Now configure the PF to use "the rest" of the resources
+ * We're using STD_TX_RING_MODE here though which will limit the TX
+ * rings. This will allow QoS to function properly. Not setting this
+ * will cause PF rings to break bandwidth settings.
+ */
+ rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ if (rc)
+ goto error_free;
+
+ rc = update_pf_resource_max(bp);
+ if (rc)
+ goto error_free;
+
+ return rc;
+
+error_free:
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ return rc;
+}
+
+int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
+ req.evb_mode = bp->pf.evb_mode;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type)
+{
+ struct hwrm_tunnel_dst_port_alloc_input req = {0};
+ struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_val = port;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ switch (tunnel_type) {
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
+ bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->vxlan_port = port;
+ break;
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
+ bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
+ bp->geneve_port = port;
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type)
+{
+ struct hwrm_tunnel_dst_port_free_input req = {0};
+ struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
+ uint32_t flags)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.flags = rte_cpu_to_le_32(flags);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
+{
+ uint32_t *flag = flagp;
+
+ vnic->flags = *flag;
+}
+
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
+}
+
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+ struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
+
+ req.req_buf_num_pages = rte_cpu_to_le_16(1);
+ req.req_buf_page_size = rte_cpu_to_le_16(
+ page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
+ req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
+ req.req_buf_page_addr[0] =
+ rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+ if (req.req_buf_page_addr[0] == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map buffer address to physical memory\n");
+ return -ENOMEM;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
+ struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = rte_cpu_to_le_16(
+ bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
+{
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
+ req.enables = rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ req.async_event_cr = rte_cpu_to_le_16(
+ bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t dflt_vlan, fid;
+ uint32_t func_cfg_flags;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+ if (is_vf) {
+ dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
+ fid = bp->pf.vf_info[vf].fid;
+ func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ } else {
+ fid = rte_cpu_to_le_16(0xffff);
+ func_cfg_flags = bp->pf.func_cfg_flags;
+ dflt_vlan = bp->vlan;
+ }
+
+ req.flags = rte_cpu_to_le_32(func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(fid);
+ req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
+ uint16_t max_bw, uint16_t enables)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(enables);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.max_bw = rte_cpu_to_le_32(max_bw);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size)
+{
+ int rc = 0;
+ struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (ec_size > sizeof(req.encap_request))
+ return -1;
+
+ HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
+
+ req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+ memcpy(req.encap_request, encaped, ec_size);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
+ struct ether_addr *mac)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
+ return rc;
+}
+
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size)
+{
+ int rc = 0;
+ struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (ec_size > sizeof(req.encap_request))
+ return -1;
+
+ HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
+
+ req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+ memcpy(req.encap_request, encaped, ec_size);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+ struct rte_eth_stats *stats)
+{
+ int rc = 0;
+ struct hwrm_stat_ctx_query_input req = {.req_type = 0};
+ struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
+
+ req.stat_ctx_id = rte_cpu_to_le_32(cid);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+
+ stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+
+ stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+
+ return rc;
+}
+
+int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+ struct hwrm_port_qstats_input req = {0};
+ struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ return 0;
+
+ HWRM_PREP(req, PORT_QSTATS, -1, resp);
+ req.port_id = rte_cpu_to_le_16(pf->port_id);
+ req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+ req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+ return rc;
+}
+
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
+{
+ struct hwrm_port_clr_stats_input req = {0};
+ struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ return 0;
+
+ HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+ req.port_id = rte_cpu_to_le_16(pf->port_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+ return rc;
+}
+
+int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_qcaps_input req = {0};
+ int rc;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
+ req.port_id = bp->pf.port_id;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+ unsigned int i;
+
+ bp->num_leds = resp->num_leds;
+ memcpy(bp->leds, &resp->led0_id,
+ sizeof(bp->leds[0]) * bp->num_leds);
+ for (i = 0; i < bp->num_leds; i++) {
+ struct bnxt_led_info *led = &bp->leds[i];
+
+ uint16_t caps = led->led_state_caps;
+
+ if (!led->led_group_id ||
+ !BNXT_LED_ALT_BLINK_CAP(caps)) {
+ bp->num_leds = 0;
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
+{
+ struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_cfg_input req = {0};
+ struct bnxt_led_cfg *led_cfg;
+ uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
+ uint16_t duration = 0;
+ int rc, i;
+
+ if (!bp->num_leds || BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ HWRM_PREP(req, PORT_LED_CFG, -1, resp);
+ if (led_on) {
+ led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
+ duration = rte_cpu_to_le_16(500);
+ }
+ req.port_id = bp->pf.port_id;
+ req.num_leds = bp->num_leds;
+ led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+ for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+ req.enables |= BNXT_LED_DFLT_ENABLES(i);
+ led_cfg->led_id = bp->leds[i].led_id;
+ led_cfg->led_state = led_state;
+ led_cfg->led_blink_on = duration;
+ led_cfg->led_blink_off = duration;
+ led_cfg->led_group_id = bp->leds[i].led_group_id;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+static void
+bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
+{
+ uint32_t *count = cbdata;
+
+ *count = *count + 1;
+}
+
+static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
+ struct bnxt_vnic_info *vnic __rte_unused)
+{
+ return 0;
+}
+
+int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
+{
+ uint32_t count = 0;
+
+ bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
+ &count, bnxt_vnic_count_hwrm_stub);
+
+ return count;
+}
+
+static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
+ uint16_t *vnic_ids)
+{
+ struct hwrm_func_vf_vnic_ids_query_input req = {0};
+ struct hwrm_func_vf_vnic_ids_query_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* First query all VNIC ids */
+ HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
+
+ req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+ req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+
+ if (req.vnic_id_tbl_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map VNIC ID table address to physical memory\n");
+ return -ENOMEM;
+ }
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ return -1;
+ } else if (resp->error_code) {
+ rc = rte_le_to_cpu_16(resp->error_code);
+ RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+ return -1;
+ }
+
+ return rte_le_to_cpu_32(resp->vnic_id_cnt);
+}
+
+/*
+ * This function queries the VNIC IDs for a specified VF. It then calls
+ * the vnic_cb to update the necessary field in vnic_info with cbdata.
+ * Then it calls the hwrm_cb function to program this new vnic configuration.
+ */
+int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
+ void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
+ int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
+{
+ struct bnxt_vnic_info vnic;
+ int rc = 0;
+ int i, num_vnic_ids;
+ uint16_t *vnic_ids;
+ size_t vnic_id_sz;
+ size_t sz;
+
+ /* First query all VNIC ids */
+ vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (vnic_ids == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+ num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+
+ if (num_vnic_ids < 0)
+ return num_vnic_ids;
+
+ /* Retrieve VNIC, update bd_stall then update */
+
+ for (i = 0; i < num_vnic_ids; i++) {
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
+ if (rc)
+ break;
+ if (vnic.mru <= 4) /* Indicates unallocated */
+ continue;
+
+ vnic_cb(&vnic, cbdata);
+
+ rc = hwrm_cb(bp, &vnic);
+ if (rc)
+ break;
+ }
+
+ rte_free(vnic_ids);
+
+ return rc;
+}
+
+int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
+ bool on)
+{
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ HWRM_PREP(req, FUNC_CFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
+ req.enables |= rte_cpu_to_le_32(
+ HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
+ req.vlan_antispoof_mode = on ?
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
+{
+ struct bnxt_vnic_info vnic;
+ uint16_t *vnic_ids;
+ size_t vnic_id_sz;
+ int num_vnic_ids, i;
+ size_t sz;
+ int rc;
+
+ vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
+ vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (vnic_ids == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
+ rte_mem_lock_page(((char *)vnic_ids) + sz);
+
+ rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
+ if (rc <= 0)
+ goto exit;
+ num_vnic_ids = rc;
+
+ /*
+ * Loop through to find the default VNIC ID.
+ * TODO: The easier way would be to obtain the resp->dflt_vnic_id
+ * by sending the hwrm_func_qcfg command to the firmware.
+ */
+ for (i = 0; i < num_vnic_ids; i++) {
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
+ bp->pf.first_vf_id + vf);
+ if (rc)
+ goto exit;
+ if (vnic.func_default) {
+ rte_free(vnic_ids);
+ return vnic.fw_vnic_id;
+ }
+ }
+ /* Could not find a default VNIC. */
+ RTE_LOG(ERR, PMD, "No default VNIC\n");
+exit:
+ rte_free(vnic_ids);
+ return -1;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 6519ef21..51cd0dd4 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -45,27 +45,42 @@ struct bnxt_cp_ring_info;
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
-int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ uint16_t vlan_count,
+ struct bnxt_vlan_table_entry *vlan_table);
+int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
+ uint16_t vlan_count,
+ struct bnxt_vlan_antispoof_table_entry *vlan_table);
int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct bnxt_filter_info *filter);
int bnxt_hwrm_set_filter(struct bnxt *bp,
- struct bnxt_vnic_info *vnic,
+ uint16_t dst_id,
struct bnxt_filter_info *filter);
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size);
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+ void *encaped, size_t ec_size);
-int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd);
-
-int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
- uint32_t *vf_req_fwd);
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
+int bnxt_hwrm_func_driver_register(struct bnxt *bp);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_func_reset(struct bnxt *bp);
int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
+int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
+ struct rte_eth_stats *stats);
+int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
+ uint64_t *dropped);
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid);
+int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp);
+int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp);
int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id);
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
int bnxt_hwrm_ring_free(struct bnxt *bp,
struct bnxt_ring *ring, uint32_t ring_type);
int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
@@ -76,16 +91,24 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
+int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
+ struct rte_eth_stats *stats);
int bnxt_hwrm_ver_get(struct bnxt *bp);
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int16_t fw_vf_id);
int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable);
int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
@@ -101,5 +124,36 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
int bnxt_hwrm_func_qcfg(struct bnxt *bp);
-
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
+int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf,
+ const uint8_t *mac_addr);
+int bnxt_hwrm_pf_evb_mode(struct bnxt *bp);
+int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
+ uint16_t max_bw, uint16_t enables);
+int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf);
+int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
+ struct ether_addr *mac);
+int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf);
+int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type);
+int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
+ uint8_t tunnel_type);
+void bnxt_free_tunnel_ports(struct bnxt *bp);
+int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf);
+int bnxt_hwrm_port_qstats(struct bnxt *bp);
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp);
+int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on);
+int bnxt_hwrm_port_led_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
+ uint32_t flags);
+void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp);
+int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf);
+int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
+ void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
+ int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic));
+int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
+ bool on);
+int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf);
#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 20e17ff5..47cda7e5 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -66,16 +66,26 @@ static void bnxt_int_handler(void *param)
/* Handle any async event */
bnxt_handle_async_event(bp, cmp);
break;
- case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+ case CMPL_BASE_TYPE_HWRM_FWD_REQ:
/* Handle HWRM forwarded responses */
bnxt_handle_fwd_req(bp, cmp);
break;
default:
/* Ignore any other events */
+ if (cmp->type & rte_cpu_to_le_16(0x01)) {
+ if (!CMP_VALID(cmp, raw_cons,
+ cpr->cp_ring_struct))
+ goto no_more;
+ }
+ RTE_LOG(INFO, PMD,
+ "Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- }
+
+ };
+no_more:
+ cpr->cp_raw_cons = raw_cons;
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
@@ -102,14 +112,15 @@ void bnxt_disable_int(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
/* Only the default completion ring */
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ if (cpr != NULL && cpr->cp_doorbell != NULL)
+ B_CP_DB_DISARM(cpr);
}
void bnxt_enable_int(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+ B_CP_DB_ARM(cpr);
}
int bnxt_setup_int(struct bnxt *bp)
@@ -126,7 +137,7 @@ int bnxt_setup_int(struct bnxt *bp)
for (i = 0; i < total_vecs; i++) {
bp->irq_tbl[i].vector = i;
snprintf(bp->irq_tbl[i].name, len,
- "%s-%d", bp->eth_dev->data->name, i);
+ "%s-%d", bp->eth_dev->device->name, i);
bp->irq_tbl[i].handler = bnxt_int_handler;
}
} else {
@@ -136,7 +147,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;
setup_exit:
- RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed");
+ RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 0fafa13f..9d0ae277 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -31,7 +31,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_bitmap.h>
#include <rte_memzone.h>
+#include <unistd.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
@@ -96,6 +98,8 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct rte_pci_device *pdev = bp->pdev;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
+ phys_addr_t mz_phys_addr;
+ int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
@@ -112,8 +116,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
int rx_vmem_len = rx_ring_info ?
RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
rx_ring_struct->vmem_size) : 0;
+ int ag_vmem_start = 0;
+ int ag_vmem_len = 0;
+ int cp_ring_start = 0;
+
+ ag_vmem_start = rx_vmem_start + rx_vmem_len;
+ ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
+ rx_ring_info->ag_ring_struct->vmem_size) : 0;
+ cp_ring_start = ag_vmem_start + ag_vmem_len;
- int cp_ring_start = rx_vmem_start + rx_vmem_len;
int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
sizeof(struct cmpl_base));
@@ -127,7 +138,23 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
sizeof(struct rx_prod_pkt_bd)) : 0;
- int total_alloc_len = rx_ring_start + rx_ring_len;
+ int ag_ring_start = rx_ring_start + rx_ring_len;
+ int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
+
+ int ag_bitmap_start = ag_ring_start + ag_ring_len;
+ int ag_bitmap_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
+ rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR)) : 0;
+
+ int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
+ int tpa_info_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
+ sizeof(struct bnxt_tpa_info)) : 0;
+
+ int total_alloc_len = tpa_info_start;
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ total_alloc_len += tpa_info_len;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
@@ -136,21 +163,37 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
if (!mz) {
- mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
SOCKET_ID_ANY,
RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ RTE_MEMZONE_SIZE_HINT_ONLY,
+ getpagesize());
if (mz == NULL)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ for (sz = 0; sz < total_alloc_len; sz += getpagesize())
+ rte_mem_lock_page(((char *)mz->addr) + sz);
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map ring address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
if (tx_ring_info) {
tx_ring = tx_ring_info->tx_ring_struct;
tx_ring->bd = ((char *)mz->addr + tx_ring_start);
tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
- tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
+ tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
tx_ring->mem_zone = (const void *)mz;
@@ -170,7 +213,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
rx_ring->bd = ((char *)mz->addr + rx_ring_start);
rx_ring_info->rx_desc_ring =
(struct rx_prod_pkt_bd *)rx_ring->bd;
- rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
+ rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
rx_ring->mem_zone = (const void *)mz;
@@ -182,10 +225,39 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
rx_ring_info->rx_buf_ring =
(struct bnxt_sw_rx_bd *)rx_ring->vmem;
}
+
+ rx_ring = rx_ring_info->ag_ring_struct;
+
+ rx_ring->bd = ((char *)mz->addr + ag_ring_start);
+ rx_ring_info->ag_desc_ring =
+ (struct rx_prod_pkt_bd *)rx_ring->bd;
+ rx_ring->bd_dma = mz->phys_addr + ag_ring_start;
+ rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
+ rx_ring->mem_zone = (const void *)mz;
+
+ if (!rx_ring->bd)
+ return -ENOMEM;
+ if (rx_ring->vmem_size) {
+ rx_ring->vmem =
+ (void **)((char *)mz->addr + ag_vmem_start);
+ rx_ring_info->ag_buf_ring =
+ (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+ }
+
+ rx_ring_info->ag_bitmap =
+ rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
+ ag_bitmap_start, ag_bitmap_len);
+
+ /* TPA info */
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ rx_ring_info->tpa_info =
+ ((struct bnxt_tpa_info *)((char *)mz->addr +
+ tpa_info_start));
}
cp_ring->bd = ((char *)mz->addr + cp_ring_start);
- cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
+ cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
cp_ring_info->cp_desc_ring = cp_ring->bd;
cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
cp_ring->mem_zone = (const void *)mz;
@@ -196,7 +268,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
*cp_ring->vmem = ((char *)mz->addr + stats_len);
if (stats_len) {
cp_ring_info->hw_stats = mz->addr;
- cp_ring_info->hw_stats_map = mz->phys_addr;
+ cp_ring_info->hw_stats_map = mz_phys_addr;
}
cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
return 0;
@@ -213,21 +285,6 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
unsigned int i;
int rc = 0;
- /* Default completion ring */
- {
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
-
- rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- 0, HWRM_NA_SIGNATURE);
- if (rc)
- goto err_out;
- cpr->cp_doorbell = pci_dev->mem_resource[2].addr;
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
- }
-
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -235,35 +292,64 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
unsigned int idx = i + 1;
+ unsigned int map_idx = idx + bp->rx_cp_nr_rings;
+
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
/* Rx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- idx, HWRM_NA_SIGNATURE);
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ idx, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
- bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+ bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Rx ring */
rc = bnxt_hwrm_ring_alloc(bp, ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
- idx, cpr->hw_stats_ctx_id);
+ idx, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
rxr->rx_prod = 0;
rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
- bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
+ ring = rxr->ag_ring_struct;
+ /* Agg ring */
+ if (ring == NULL)
+ RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ map_idx, HWRM_NA_SIGNATURE,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+ RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
+ rxr->ag_prod = 0;
+ rxr->ag_doorbell =
+ (char *)pci_dev->mem_resource[2].addr +
+ map_idx * 0x80;
+ bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!");
+ RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ rxq->index = idx;
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -272,29 +358,34 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
- unsigned int idx = 1 + bp->rx_cp_nr_rings + i;
+ unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
+
+ /* Account for AGG Rings. AGG ring cnt = Rx Cmpl ring cnt */
+ idx += bp->rx_cp_nr_rings;
/* Tx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
- idx, HWRM_NA_SIGNATURE);
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ idx, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
- bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Tx ring */
rc = bnxt_hwrm_ring_alloc(bp, ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
- idx, cpr->hw_stats_ctx_id);
+ idx, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
+ txq->index = idx;
}
err_out:
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 8656549a..6d1eb588 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -57,7 +57,8 @@
#define DEFAULT_RX_RING_SIZE 256
#define DEFAULT_TX_RING_SIZE 256
-#define MAX_TPA 128
+#define BNXT_TPA_MAX 64
+#define AGG_RING_SIZE_FACTOR 2
/* These assume 4k pages */
#define MAX_RX_DESC_CNT (8 * 1024)
@@ -65,6 +66,7 @@
#define MAX_CP_DESC_CNT (16 * 1024)
#define INVALID_HW_RING_ID ((uint16_t)-1)
+#define INVALID_STATS_CTX_ID ((uint16_t)-1)
struct bnxt_ring {
void *bd;
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index cddf17d5..0793820b 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -76,6 +76,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
bp->nr_vnics++;
@@ -84,9 +85,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->func_default = true;
vnic->ff_pool_idx = 0;
- vnic->start_grp_id = 1;
- vnic->end_grp_id = vnic->start_grp_id +
- bp->rx_cp_nr_rings - 1;
+ vnic->start_grp_id = 0;
+ vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
@@ -121,13 +121,16 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
/* For each pool, allocate MACVLAN CFA rule & VNIC */
if (!pools) {
+ pools = RTE_MIN(bp->max_vnics,
+ RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
RTE_LOG(ERR, PMD,
"VMDq pool not set, defaulted to 64\n");
pools = ETH_64_POOLS;
}
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- start_grp_id = 1;
- end_grp_id = start_grp_id + nb_q_per_grp - 1;
+ start_grp_id = 0;
+ end_grp_id = nb_q_per_grp;
ring_idx = 0;
for (i = 0; i < pools; i++) {
@@ -138,6 +141,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
bp->nr_vnics++;
@@ -178,6 +182,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
/* Partition the rx queues for the single pool */
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
rxq = bp->eth_dev->data->rx_queues[i];
@@ -188,9 +193,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->func_default = true;
vnic->ff_pool_idx = 0;
- vnic->start_grp_id = 1;
- vnic->end_grp_id = vnic->start_grp_id +
- bp->rx_cp_nr_rings - 1;
+ vnic->start_grp_id = 0;
+ vnic->end_grp_id = bp->rx_cp_nr_rings;
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
@@ -213,9 +217,10 @@ err_out:
return rc;
}
-static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
+static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
+ struct bnxt_tpa_info *tpa_info;
uint16_t i;
if (rxq) {
@@ -228,6 +233,27 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
}
}
}
+ /* Free up mbufs in Agg ring */
+ sw_ring = rxq->rx_ring->ag_buf_ring;
+ if (sw_ring) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
+ }
+ }
+ }
}
}
@@ -251,6 +277,8 @@ void bnxt_rx_queue_release_op(void *rx_queue)
/* Free RX ring hardware descriptors */
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ /* Free RX Agg ring hardware descriptors */
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
/* Free RX completion ring hardware descriptors */
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
@@ -273,7 +301,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -286,7 +314,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+ RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@@ -295,6 +323,9 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
goto out;
@@ -308,7 +339,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+ RTE_LOG(ERR, PMD,
+ "ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 95543298..01aaa007 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -52,12 +52,15 @@ struct bnxt_rx_queue {
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
struct bnxt *bp;
+ int index;
struct bnxt_vnic_info *vnic;
uint32_t rx_buf_size;
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
+
+ struct bnxt_tpa_info *rx_tpa;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 5d93de26..bee67d33 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -34,6 +34,7 @@
#include <inttypes.h>
#include <stdbool.h>
+#include <rte_bitmap.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
#include <rte_memory.h>
@@ -67,8 +68,37 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
struct rte_mbuf *data;
data = __bnxt_alloc_rx_data(rxq->mb_pool);
- if (!data)
+ if (!data) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
+ }
+
+ rx_buf->mbuf = data;
+
+ rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+
+ return 0;
+}
+
+static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
+ struct bnxt_rx_ring_info *rxr,
+ uint16_t prod)
+{
+ struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
+ struct rte_mbuf *data;
+
+ data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!data) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
+
+ if (rxbd == NULL)
+ RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
+ if (rx_buf == NULL)
+ RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
+
rx_buf->mbuf = data;
@@ -77,24 +107,232 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
return 0;
}
-static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
struct rte_mbuf *mbuf)
{
- uint16_t prod = rxr->rx_prod;
+ uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
struct bnxt_sw_rx_bd *prod_rx_buf;
- struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+ struct rx_prod_pkt_bd *prod_bd;
prod_rx_buf = &rxr->rx_buf_ring[prod];
+ RTE_ASSERT(prod_rx_buf->mbuf == NULL);
+ RTE_ASSERT(mbuf != NULL);
+
prod_rx_buf->mbuf = mbuf;
prod_bd = &rxr->rx_desc_ring[prod];
- cons_bd = &rxr->rx_desc_ring[cons];
+
+ prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
+
+ rxr->rx_prod = prod;
+}
+
+#ifdef BNXT_DEBUG
+static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+ struct rte_mbuf *mbuf)
+{
+ uint16_t prod = rxr->ag_prod;
+ struct bnxt_sw_rx_bd *prod_rx_buf;
+ struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+
+ prod_rx_buf = &rxr->ag_buf_ring[prod];
+
+ prod_rx_buf->mbuf = mbuf;
+
+ prod_bd = &rxr->ag_desc_ring[prod];
+ cons_bd = &rxr->ag_desc_ring[cons];
prod_bd->addr = cons_bd->addr;
}
+#endif
+
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
+ RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ RTE_LOG(ERR, PMD,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
-static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ ag_buf->mbuf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
+ >> RX_TPA_END_CMPL_AGG_ID_SFT;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ struct bnxt_tpa_info *tpa_info;
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ rte_prefetch0(mbuf);
+ agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
+ }
+ mbuf->l4_len = tpa_end->payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ return NULL;
+ }
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -104,9 +342,13 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
uint32_t tmp_raw_cons = *raw_cons;
uint16_t cons, prod, cp_cons =
RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- struct bnxt_sw_rx_bd *rx_buf;
+#ifdef BNXT_DEBUG
+ uint16_t ag_cons;
+#endif
struct rte_mbuf *mbuf;
int rc = 0;
+ uint8_t agg_buf = 0;
+ uint16_t cmp_type;
rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
@@ -118,14 +360,39 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
return -EBUSY;
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
+ bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
+ (struct rx_tpa_start_cmpl_hi *)rxcmp1);
+ rc = -EINVAL; /* Continue w/o new mbuf */
+ goto next_rx;
+ } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
+ (struct rx_tpa_end_cmpl *)rxcmp,
+ (struct rx_tpa_end_cmpl_hi *)rxcmp1);
+ if (unlikely(!mbuf))
+ return -EBUSY;
+ *rx_pkt = mbuf;
+ goto next_rx;
+ } else if (cmp_type != 0x11) {
+ rc = -EINVAL;
+ goto next_rx;
+ }
+
+ agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
+ >> RX_PKT_CMPL_AGG_BUFS_SFT;
+ if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
+ return -EBUSY;
+
prod = rxr->rx_prod;
- /* EW - GRO deferred to phase 3 */
cons = rxcmp->opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- mbuf = rx_buf->mbuf;
+ mbuf = bnxt_consume_rx_buf(rxr, cons);
rte_prefetch0(mbuf);
+ if (mbuf == NULL)
+ return -ENOMEM;
+
mbuf->nb_segs = 1;
mbuf->next = NULL;
mbuf->pkt_len = rxcmp->len;
@@ -139,6 +406,10 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->hash.fdir.id = rxcmp1->cfa_code;
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
+
+ if (agg_buf)
+ bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
+
if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
mbuf->vlan_tci = rxcmp1->metadata &
(RX_PKT_CMPL_METADATA_VID_MASK |
@@ -147,14 +418,17 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_VLAN_PKT;
}
- rx_buf->mbuf = NULL;
+#ifdef BNXT_DEBUG
if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
/* Re-install the mbuf back to the rx ring */
bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
+ if (agg_buf)
+ bnxt_reuse_ag_mbuf(rxr, ag_cons, mbuf);
rc = -EIO;
goto next_rx;
}
+#endif
/*
* TODO: Redesign this....
* If the allocation fails, the packet does not get received.
@@ -170,24 +444,20 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
* calls in favour of a tight loop with the same function being called
* in it.
*/
+ prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
- goto next_rx;
}
-
+ rxr->rx_prod = prod;
/*
* All MBUFs are allocated with the same size under DPDK,
* no optimization for rx_copy_thresh
*/
- /* AGG buf operation is deferred */
-
- /* EW - VLAN reception. Must compare against the ol_flags */
-
*rx_pkt = mbuf;
+
next_rx:
- rxr->rx_prod = RING_NEXT(rxr->rx_ring_struct, prod);
*raw_cons = tmp_raw_cons;
@@ -203,8 +473,9 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint32_t raw_cons = cpr->cp_raw_cons;
uint32_t cons;
int nb_rx_pkts = 0;
- bool rx_event = false;
struct rx_pkt_cmpl *rxcmp;
+ uint16_t prod = rxr->rx_prod;
+ uint16_t ag_prod = rxr->ag_prod;
/* Handle RX burst request */
while (1) {
@@ -222,26 +493,27 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
if (likely(!rc))
nb_rx_pkts++;
- else if (rc == -EBUSY) /* partial completion */
+ if (rc == -EBUSY) /* partial completion */
break;
- rx_event = true;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
if (nb_rx_pkts == nb_pkts)
break;
}
- if (raw_cons == cpr->cp_raw_cons) {
+
+ cpr->cp_raw_cons = raw_cons;
+ if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
/*
* For PMD, there is no need to keep on pushing to REARM
* the doorbell if there are no new completions
*/
return nb_rx_pkts;
}
- cpr->cp_raw_cons = raw_cons;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- if (rx_event)
- B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ /* Ring the AGG ring DB */
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
return nb_rx_pkts;
}
@@ -257,6 +529,12 @@ void bnxt_free_rx_rings(struct bnxt *bp)
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
rte_free(rxq->rx_ring->rx_ring_struct);
+
+ /* Free the Aggregator ring */
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+ rte_free(rxq->rx_ring->ag_ring_struct);
+ rxq->rx_ring->ag_ring_struct = NULL;
+
rte_free(rxq->rx_ring);
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
@@ -270,13 +548,11 @@ void bnxt_free_rx_rings(struct bnxt *bp)
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
- struct bnxt *bp = rxq->bp;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
- rxq->rx_buf_use_size = bp->eth_dev->data->mtu +
- ETHER_HDR_LEN + ETHER_CRC_LEN +
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
(2 * VLAN_TAG_SIZE);
rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
@@ -313,13 +589,29 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
cpr->cp_ring_struct = ring;
- ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+ ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
+ (2 + AGG_RING_SIZE_FACTOR));
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->vmem_size = 0;
ring->vmem = NULL;
+ /* Allocate Aggregator rings */
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ rxr->ag_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
+ AGG_RING_SIZE_FACTOR);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)rxr->ag_desc_ring;
+ ring->bd_dma = rxr->ag_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
+ ring->vmem = (void **)&rxr->ag_buf_ring;
+
return 0;
}
@@ -332,8 +624,8 @@ static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
if (!rx_bd_ring)
return;
for (j = 0; j < ring->ring_size; j++) {
- rx_bd_ring[j].flags_type = type;
- rx_bd_ring[j].len = len;
+ rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
+ rx_bd_ring[j].len = rte_cpu_to_le_16(len);
rx_bd_ring[j].opaque = j;
}
}
@@ -344,12 +636,17 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
struct bnxt_ring *ring;
uint32_t prod, type;
unsigned int i;
+ uint16_t size;
+
+ size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (rxq->rx_buf_use_size <= size)
+ size = rxq->rx_buf_use_size;
- type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
+ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
rxr = rxq->rx_ring;
ring = rxr->rx_ring_struct;
- bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size);
+ bnxt_init_rxbds(ring, type, size);
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
@@ -362,6 +659,36 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
+ RTE_LOG(DEBUG, PMD, "%s\n", __func__);
+
+ ring = rxr->ag_ring_struct;
+ type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
+ bnxt_init_rxbds(ring, type, size);
+ prod = rxr->ag_prod;
+
+ for (i = 0; i < ring->ring_size; i++) {
+ if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
+ RTE_LOG(WARNING, PMD,
+ "init'ed AG ring %d with %d/%d mbufs only\n",
+ rxq->queue_id, i, ring->ring_size);
+ break;
+ }
+ rxr->ag_prod = prod;
+ prod = RING_NEXT(rxr->ag_ring_struct, prod);
+ }
+ RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+
+ if (rxr->tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ rxr->tpa_info[i].mbuf =
+ __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!rxr->tpa_info[i].mbuf) {
+ rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ return -ENOMEM;
+ }
+ }
+ }
+ RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
return 0;
}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index f766b26c..f8d6dc80 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -37,20 +37,66 @@
#define B_RX_DB(db, prod) \
(*(uint32_t *)db = (DB_KEY_RX | prod))
+#define BNXT_TPA_L4_SIZE(x) \
+ { \
+ typeof(x) hdr_info = (x); \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
+ }
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
+
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE, /* Undefined type */
+ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
+ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
+ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+struct bnxt_tpa_info {
+ struct rte_mbuf *mbuf;
+ uint16_t len;
+ unsigned short gso_type;
+ uint32_t flags2;
+ uint32_t metadata;
+ enum pkt_hash_types hash_type;
+ uint32_t rss_hash;
+ uint32_t hdr_info;
+};
+
struct bnxt_sw_rx_bd {
struct rte_mbuf *mbuf; /* data associated with RX descriptor */
};
struct bnxt_rx_ring_info {
uint16_t rx_prod;
+ uint16_t ag_prod;
void *rx_doorbell;
+ void *ag_doorbell;
struct rx_prod_pkt_bd *rx_desc_ring;
+ struct rx_prod_pkt_bd *ag_desc_ring;
struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
+ struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */
phys_addr_t rx_desc_mapping;
+ phys_addr_t ag_desc_mapping;
struct bnxt_ring *rx_ring_struct;
+ struct bnxt_ring *ag_ring_struct;
+
+ /*
+ * To deal with out of order return from TPA, use free buffer indicator
+ */
+ struct rte_bitmap *ag_bitmap;
+
+ struct bnxt_tpa_info *tpa_info;
};
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 40c9cac1..d7d0e35c 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -43,6 +43,171 @@
#include "bnxt_txq.h"
#include "hsi_struct_def_dpdk.h"
+static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
+ {"rx_64b_frames", offsetof(struct rx_port_stats,
+ rx_64b_frames)},
+ {"rx_65b_127b_frames", offsetof(struct rx_port_stats,
+ rx_65b_127b_frames)},
+ {"rx_128b_255b_frames", offsetof(struct rx_port_stats,
+ rx_128b_255b_frames)},
+ {"rx_256b_511b_frames", offsetof(struct rx_port_stats,
+ rx_256b_511b_frames)},
+ {"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
+ rx_512b_1023b_frames)},
+ {"rx_1024b_1518_frames", offsetof(struct rx_port_stats,
+ rx_1024b_1518_frames)},
+ {"rx_good_vlan_frames", offsetof(struct rx_port_stats,
+ rx_good_vlan_frames)},
+ {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
+ rx_1519b_2047b_frames)},
+ {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats,
+ rx_2048b_4095b_frames)},
+ {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats,
+ rx_4096b_9216b_frames)},
+ {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats,
+ rx_9217b_16383b_frames)},
+ {"rx_total_frames", offsetof(struct rx_port_stats,
+ rx_total_frames)},
+ {"rx_ucast_frames", offsetof(struct rx_port_stats,
+ rx_ucast_frames)},
+ {"rx_mcast_frames", offsetof(struct rx_port_stats,
+ rx_mcast_frames)},
+ {"rx_bcast_frames", offsetof(struct rx_port_stats,
+ rx_bcast_frames)},
+ {"rx_fcs_err_frames", offsetof(struct rx_port_stats,
+ rx_fcs_err_frames)},
+ {"rx_ctrl_frames", offsetof(struct rx_port_stats,
+ rx_ctrl_frames)},
+ {"rx_pause_frames", offsetof(struct rx_port_stats,
+ rx_pause_frames)},
+ {"rx_pfc_frames", offsetof(struct rx_port_stats,
+ rx_pfc_frames)},
+ {"rx_align_err_frames", offsetof(struct rx_port_stats,
+ rx_align_err_frames)},
+ {"rx_ovrsz_frames", offsetof(struct rx_port_stats,
+ rx_ovrsz_frames)},
+ {"rx_jbr_frames", offsetof(struct rx_port_stats,
+ rx_jbr_frames)},
+ {"rx_mtu_err_frames", offsetof(struct rx_port_stats,
+ rx_mtu_err_frames)},
+ {"rx_tagged_frames", offsetof(struct rx_port_stats,
+ rx_tagged_frames)},
+ {"rx_double_tagged_frames", offsetof(struct rx_port_stats,
+ rx_double_tagged_frames)},
+ {"rx_good_frames", offsetof(struct rx_port_stats,
+ rx_good_frames)},
+ {"rx_undrsz_frames", offsetof(struct rx_port_stats,
+ rx_undrsz_frames)},
+ {"rx_eee_lpi_events", offsetof(struct rx_port_stats,
+ rx_eee_lpi_events)},
+ {"rx_eee_lpi_duration", offsetof(struct rx_port_stats,
+ rx_eee_lpi_duration)},
+ {"rx_bytes", offsetof(struct rx_port_stats,
+ rx_bytes)},
+ {"rx_runt_bytes", offsetof(struct rx_port_stats,
+ rx_runt_bytes)},
+ {"rx_runt_frames", offsetof(struct rx_port_stats,
+ rx_runt_frames)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
+ {"tx_64b_frames", offsetof(struct tx_port_stats,
+ tx_64b_frames)},
+ {"tx_65b_127b_frames", offsetof(struct tx_port_stats,
+ tx_65b_127b_frames)},
+ {"tx_128b_255b_frames", offsetof(struct tx_port_stats,
+ tx_128b_255b_frames)},
+ {"tx_256b_511b_frames", offsetof(struct tx_port_stats,
+ tx_256b_511b_frames)},
+ {"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
+ tx_512b_1023b_frames)},
+ {"tx_1024b_1518_frames", offsetof(struct tx_port_stats,
+ tx_1024b_1518_frames)},
+ {"tx_good_vlan_frames", offsetof(struct tx_port_stats,
+ tx_good_vlan_frames)},
+ {"tx_1519b_2047_frames", offsetof(struct tx_port_stats,
+ tx_1519b_2047_frames)},
+ {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
+ tx_2048b_4095b_frames)},
+ {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
+ tx_4096b_9216b_frames)},
+ {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats,
+ tx_9217b_16383b_frames)},
+ {"tx_good_frames", offsetof(struct tx_port_stats,
+ tx_good_frames)},
+ {"tx_total_frames", offsetof(struct tx_port_stats,
+ tx_total_frames)},
+ {"tx_ucast_frames", offsetof(struct tx_port_stats,
+ tx_ucast_frames)},
+ {"tx_mcast_frames", offsetof(struct tx_port_stats,
+ tx_mcast_frames)},
+ {"tx_bcast_frames", offsetof(struct tx_port_stats,
+ tx_bcast_frames)},
+ {"tx_pause_frames", offsetof(struct tx_port_stats,
+ tx_pause_frames)},
+ {"tx_pfc_frames", offsetof(struct tx_port_stats,
+ tx_pfc_frames)},
+ {"tx_jabber_frames", offsetof(struct tx_port_stats,
+ tx_jabber_frames)},
+ {"tx_fcs_err_frames", offsetof(struct tx_port_stats,
+ tx_fcs_err_frames)},
+ {"tx_err", offsetof(struct tx_port_stats,
+ tx_err)},
+ {"tx_fifo_underruns", offsetof(struct tx_port_stats,
+ tx_fifo_underruns)},
+ {"tx_eee_lpi_events", offsetof(struct tx_port_stats,
+ tx_eee_lpi_events)},
+ {"tx_eee_lpi_duration", offsetof(struct tx_port_stats,
+ tx_eee_lpi_duration)},
+ {"tx_total_collisions", offsetof(struct tx_port_stats,
+ tx_total_collisions)},
+ {"tx_bytes", offsetof(struct tx_port_stats,
+ tx_bytes)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
+ {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_ucast_pkts)},
+ {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_mcast_pkts)},
+ {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_bcast_pkts)},
+ {"tx_err_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_err_pkts)},
+ {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_drop_pkts)},
+ {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_ucast_bytes)},
+ {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_mcast_bytes)},
+ {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ tx_bcast_bytes)},
+ {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_ucast_pkts)},
+ {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_mcast_pkts)},
+ {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_bcast_pkts)},
+ {"rx_err_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_err_pkts)},
+ {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_drop_pkts)},
+ {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_ucast_bytes)},
+ {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_mcast_bytes)},
+ {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_bcast_bytes)},
+ {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_pkts)},
+ {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_bytes)},
+ {"rx_agg_events", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_events)},
+ {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output,
+ rx_agg_aborts)},
+};
+
/*
* Statistics functions
*/
@@ -74,64 +239,18 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
- struct ctx_hw_stats64 *hw_stats =
- (struct ctx_hw_stats64 *)cpr->hw_stats;
-
- bnxt_stats->q_ipackets[i] +=
- rte_le_to_cpu_64(hw_stats->rx_ucast_pkts);
- bnxt_stats->q_ipackets[i] +=
- rte_le_to_cpu_64(hw_stats->rx_mcast_pkts);
- bnxt_stats->q_ipackets[i] +=
- rte_le_to_cpu_64(hw_stats->rx_bcast_pkts);
-
- bnxt_stats->q_ibytes[i] +=
- rte_le_to_cpu_64(hw_stats->rx_ucast_bytes);
- bnxt_stats->q_ibytes[i] +=
- rte_le_to_cpu_64(hw_stats->rx_mcast_bytes);
- bnxt_stats->q_ibytes[i] +=
- rte_le_to_cpu_64(hw_stats->rx_bcast_bytes);
-
- /*
- * TBD: No clear mapping to this... we don't seem
- * to have a stat specifically for dropped due to
- * insufficient mbufs.
- */
- bnxt_stats->q_errors[i] = 0;
-
- /* These get replaced once the *_QSTATS commands work */
- bnxt_stats->ipackets += bnxt_stats->q_ipackets[i];
- bnxt_stats->ibytes += bnxt_stats->q_ibytes[i];
- bnxt_stats->imissed += bnxt_stats->q_errors[i];
- bnxt_stats->ierrors +=
- rte_le_to_cpu_64(hw_stats->rx_discard_pkts);
+
+ bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- struct ctx_hw_stats64 *hw_stats =
- (struct ctx_hw_stats64 *)cpr->hw_stats;
-
- bnxt_stats->q_opackets[i] +=
- rte_le_to_cpu_64(hw_stats->tx_ucast_pkts);
- bnxt_stats->q_opackets[i] +=
- rte_le_to_cpu_64(hw_stats->tx_mcast_pkts);
- bnxt_stats->q_opackets[i] +=
- rte_le_to_cpu_64(hw_stats->tx_bcast_pkts);
-
- bnxt_stats->q_obytes[i] +=
- rte_le_to_cpu_64(hw_stats->tx_ucast_bytes);
- bnxt_stats->q_obytes[i] +=
- rte_le_to_cpu_64(hw_stats->tx_mcast_bytes);
- bnxt_stats->q_obytes[i] +=
- rte_le_to_cpu_64(hw_stats->tx_bcast_bytes);
-
- /* These get replaced once the *_QSTATS commands work */
- bnxt_stats->opackets += bnxt_stats->q_opackets[i];
- bnxt_stats->obytes += bnxt_stats->q_obytes[i];
- bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts);
- bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_discard_pkts);
+
+ bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
}
+ bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
+ bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
}
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
@@ -139,4 +258,103 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
bnxt_clear_all_hwrm_stat_ctxs(bp);
+ rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
+}
+
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats, unsigned int n)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ unsigned int count, i;
+ uint64_t tx_drop_pkts;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
+ RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+ return 0;
+ }
+
+ bnxt_hwrm_port_qstats(bp);
+ bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
+
+ count = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1; /* For tx_drop_pkts */
+
+ if (n < count)
+ return count;
+
+ count = 0;
+ for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+ uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+ xstats[count].value = rte_le_to_cpu_64(
+ *(uint64_t *)((char *)rx_stats +
+ bnxt_rx_stats_strings[i].offset));
+ count++;
+ }
+
+ for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+ uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+ xstats[count].value = rte_le_to_cpu_64(
+ *(uint64_t *)((char *)tx_stats +
+ bnxt_tx_stats_strings[i].offset));
+ count++;
+ }
+
+ /* The Tx drop pkts aka the Anti spoof coounter */
+ xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
+ count++;
+
+ return count;
+}
+
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ /* Account for the Tx drop pkts aka the Anti spoof counter */
+ const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1;
+ unsigned int i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_rx_stats_strings[i].name);
+ count++;
+ }
+
+ for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_tx_stats_strings[i].name);
+ count++;
+ }
+
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_func_stats_strings[4].name);
+ count++;
+ }
+ return stat_cnt;
+}
+
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (bp->flags & BNXT_FLAG_PORT_STATS && !BNXT_NPAR_PF(bp))
+ bnxt_hwrm_port_clr_stats(bp);
+
+ if (BNXT_VF(bp))
+ RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
+ if (BNXT_NPAR_PF(bp))
+ RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ RTE_LOG(ERR, PMD, "Operation not supported\n");
}
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index 65408a44..b6d133ef 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -40,5 +40,15 @@ void bnxt_free_stats(struct bnxt *bp);
void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *bnxt_stats);
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev);
+int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit);
+int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats, unsigned int n);
+void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev);
+struct bnxt_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 0d15bb1e..6870b16d 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -213,7 +213,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* TSO */
txbd1->lflags = TX_BD_LONG_LFLAGS_LSO;
txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
- tx_pkt->l4_len;
+ tx_pkt->l4_len + tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
txbd1->mss = tx_pkt->tso_segsz;
} else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM |
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 33fdde2f..db9fb079 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -69,21 +69,14 @@ void bnxt_init_vnics(struct bnxt *bp)
uint16_t max_vnics;
int i, j;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
STAILQ_INIT(&bp->free_vnic_list);
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
@@ -177,19 +170,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
- HW_HASH_KEY_SIZE);
+ HW_HASH_KEY_SIZE +
+ BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);
uint16_t max_vnics;
int i;
+ phys_addr_t mz_phys_addr;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
@@ -204,6 +191,19 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
if (!mz)
return -ENOMEM;
}
+ mz_phys_addr = mz->phys_addr;
+ if ((unsigned long)mz->addr == mz_phys_addr) {
+ RTE_LOG(WARNING, PMD,
+ "Memzone physical address same as virtual.\n");
+ RTE_LOG(WARNING, PMD,
+ "Using rte_mem_virt2phy()\n");
+ mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ if (mz_phys_addr == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map vnic address to physical memory\n");
+ return -ENOMEM;
+ }
+ }
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
@@ -213,12 +213,16 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
(void *)((char *)mz->addr + (entry_length * i));
memset(vnic->rss_table, -1, entry_length);
- vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i);
+ vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i);
vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table));
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr +
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
+ vnic->mc_list = (void *)((char *)vnic->rss_hash_key +
+ HW_HASH_KEY_SIZE);
+ vnic->mc_list_dma_addr = vnic->rss_hash_key_dma_addr +
+ HW_HASH_KEY_SIZE;
}
return 0;
@@ -232,15 +236,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
if (bp->vnic_info == NULL)
return;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
@@ -258,15 +254,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
struct bnxt_vnic_info *vnic_mem;
uint16_t max_vnics;
- if (BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
-
- max_vnics = pf->max_vnics;
- } else {
- struct bnxt_vf_info *vf = &bp->vf;
-
- max_vnics = vf->max_vnics;
- }
+ max_vnics = bp->max_vnics;
/* Allocate memory for VNIC pool and filter pool */
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 9671ba42..993f2212 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -42,8 +42,7 @@ struct bnxt_vnic_info {
uint8_t ff_pool_idx;
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
- uint16_t fw_rss_cos_lb_ctx;
- uint16_t ctx_is_rss_cos_lb;
+ uint16_t rss_rule;
#define MAX_NUM_TRAFFIC_CLASSES 8
#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
@@ -51,17 +50,34 @@ struct bnxt_vnic_info {
uint16_t start_grp_id;
uint16_t end_grp_id;
uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t dflt_ring_grp;
+ uint16_t mru;
uint16_t hash_type;
phys_addr_t rss_table_dma_addr;
uint16_t *rss_table;
phys_addr_t rss_hash_key_dma_addr;
void *rss_hash_key;
+ phys_addr_t mc_list_dma_addr;
+ char *mc_list;
+ uint32_t mc_addr_cnt;
+#define BNXT_MAX_MC_ADDRS 16
uint32_t flags;
#define BNXT_VNIC_INFO_PROMISC (1 << 0)
#define BNXT_VNIC_INFO_ALLMULTI (1 << 1)
+#define BNXT_VNIC_INFO_BCAST (1 << 2)
+#define BNXT_VNIC_INFO_UCAST (1 << 3)
+#define BNXT_VNIC_INFO_MCAST (1 << 4)
+#define BNXT_VNIC_INFO_TAGGED (1 << 5)
+#define BNXT_VNIC_INFO_UNTAGGED (1 << 6)
+ uint16_t cos_rule;
+ uint16_t lb_rule;
bool vlan_strip;
bool func_default;
+ bool bd_stall;
+ bool roce_dual;
+ bool roce_only;
+ bool rss_dflt_cr;
STAILQ_HEAD(, bnxt_filter_info) filter;
};
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index f0248377..cb8660af 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) Broadcom Limited.
+ * Copyright(c) 2001-2017 Broadcom Limited.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,74 +31,89 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _HSI_STRUCT_DEF_EXTERNAL_H_
-#define _HSI_STRUCT_DEF_EXTERNAL_H_
-
-/*
- * per-context HW statistics -- chip view
- */
-
-struct ctx_hw_stats64 {
- uint64_t rx_ucast_pkts;
- uint64_t rx_mcast_pkts;
- uint64_t rx_bcast_pkts;
- uint64_t rx_drop_pkts;
- uint64_t rx_discard_pkts;
- uint64_t rx_ucast_bytes;
- uint64_t rx_mcast_bytes;
- uint64_t rx_bcast_bytes;
-
- uint64_t tx_ucast_pkts;
- uint64_t tx_mcast_pkts;
- uint64_t tx_bcast_pkts;
- uint64_t tx_drop_pkts;
- uint64_t tx_discard_pkts;
- uint64_t tx_ucast_bytes;
- uint64_t tx_mcast_bytes;
- uint64_t tx_bcast_bytes;
-
- uint64_t tpa_pkts;
- uint64_t tpa_bytes;
- uint64_t tpa_events;
- uint64_t tpa_aborts;
-} __attribute__((packed));
-
-/* HW Resource Manager Specification 1.5.1 */
+#ifndef _HSI_STRUCT_DEF_DPDK_
+#define _HSI_STRUCT_DEF_DPDK_
+/* HSI and HWRM Specification 1.7.7 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 5
-#define HWRM_VERSION_UPDATE 1
-
-#define HWRM_VERSION_STR "1.5.1"
+#define HWRM_VERSION_MINOR 7
+#define HWRM_VERSION_UPDATE 7
+#define HWRM_VERSION_STR "1.7.7"
/*
* Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
+ * applicable (All F's). Need to cast it the size of the field if needed.
*/
-#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
+#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
+#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
+#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1
+#define HWRM_ROCE_SP_HSI_VERSION_MINOR 7
+#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 4
/*
* Request types
*/
#define HWRM_VER_GET (UINT32_C(0x0))
+#define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe))
+#define HWRM_FUNC_VF_CFG (UINT32_C(0xf))
+ /* Reserved for future use */
+#define RESERVED1 (UINT32_C(0x10))
#define HWRM_FUNC_RESET (UINT32_C(0x11))
+#define HWRM_FUNC_GETFID (UINT32_C(0x12))
+#define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13))
+#define HWRM_FUNC_VF_FREE (UINT32_C(0x14))
#define HWRM_FUNC_QCAPS (UINT32_C(0x15))
#define HWRM_FUNC_QCFG (UINT32_C(0x16))
+#define HWRM_FUNC_CFG (UINT32_C(0x17))
+#define HWRM_FUNC_QSTATS (UINT32_C(0x18))
+#define HWRM_FUNC_CLR_STATS (UINT32_C(0x19))
#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a))
+#define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b))
+#define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c))
#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d))
+#define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e))
+#define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f))
#define HWRM_PORT_PHY_CFG (UINT32_C(0x20))
+#define HWRM_PORT_MAC_CFG (UINT32_C(0x21))
+#define HWRM_PORT_QSTATS (UINT32_C(0x23))
+#define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24))
+#define HWRM_PORT_CLR_STATS (UINT32_C(0x25))
#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
+#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28))
+#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a))
+#define HWRM_PORT_LED_CFG (UINT32_C(0x2d))
+#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e))
+#define HWRM_PORT_LED_QCAPS (UINT32_C(0x2f))
#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30))
+#define HWRM_QUEUE_QCFG (UINT32_C(0x31))
+#define HWRM_QUEUE_CFG (UINT32_C(0x32))
+#define HWRM_FUNC_VLAN_CFG (UINT32_C(0x33))
+#define HWRM_FUNC_VLAN_QCFG (UINT32_C(0x34))
+#define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35))
+#define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36))
+#define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37))
+#define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38))
+#define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39))
+#define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a))
+#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
#define HWRM_VNIC_FREE (UINT32_C(0x41))
#define HWRM_VNIC_CFG (UINT32_C(0x42))
+#define HWRM_VNIC_QCFG (UINT32_C(0x43))
+#define HWRM_VNIC_TPA_CFG (UINT32_C(0x44))
#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46))
+#define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47))
+#define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48))
+#define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49))
+#define HWRM_VNIC_QCAPS (UINT32_C(0x4a))
#define HWRM_RING_ALLOC (UINT32_C(0x50))
#define HWRM_RING_FREE (UINT32_C(0x51))
+#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52))
+#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53))
+#define HWRM_RING_RESET (UINT32_C(0x5e))
#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60))
#define HWRM_RING_GRP_FREE (UINT32_C(0x61))
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70))
@@ -107,16 +122,71 @@ struct ctx_hw_stats64 {
#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91))
#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92))
#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93))
+ /* Reserved for future use */
+#define HWRM_CFA_VLAN_ANTISPOOF_CFG (UINT32_C(0x94))
+#define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95))
+#define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96))
+#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99))
+#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a))
+#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b))
+#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0))
+#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1))
+#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2))
#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0))
#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1))
+#define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2))
#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3))
+#define HWRM_FW_RESET (UINT32_C(0xc0))
+#define HWRM_FW_QSTATUS (UINT32_C(0xc1))
#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0))
+#define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1))
+#define HWRM_FWD_RESP (UINT32_C(0xd2))
+#define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3))
+#define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0))
+#define HWRM_WOL_FILTER_ALLOC (UINT32_C(0xf0))
+#define HWRM_WOL_FILTER_FREE (UINT32_C(0xf1))
+#define HWRM_WOL_FILTER_QCFG (UINT32_C(0xf2))
+#define HWRM_WOL_REASON_QCFG (UINT32_C(0xf3))
+#define HWRM_DBG_DUMP (UINT32_C(0xff14))
+#define HWRM_NVM_VALIDATE_OPTION (UINT32_C(0xffef))
+#define HWRM_NVM_FLUSH (UINT32_C(0xfff0))
+#define HWRM_NVM_GET_VARIABLE (UINT32_C(0xfff1))
+#define HWRM_NVM_SET_VARIABLE (UINT32_C(0xfff2))
+#define HWRM_NVM_INSTALL_UPDATE (UINT32_C(0xfff3))
+#define HWRM_NVM_MODIFY (UINT32_C(0xfff4))
+#define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5))
+#define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6))
+#define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7))
+#define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8))
+#define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9))
+#define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa))
+#define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb))
+#define HWRM_NVM_RAW_DUMP (UINT32_C(0xfffc))
+#define HWRM_NVM_READ (UINT32_C(0xfffd))
+#define HWRM_NVM_WRITE (UINT32_C(0xfffe))
+#define HWRM_NVM_RAW_WRITE_BLK (UINT32_C(0xffff))
-/* Return Codes */
-#define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
-#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
-
-/* Short TX BD (16 bytes) */
+/*
+ * Note: The Host Software Interface (HSI) and Hardware Resource Manager (HWRM)
+ * specification describes the data structures used in Ethernet packet or RDMA
+ * message data transfers as well as an abstract interface for managing Ethernet
+ * NIC hardware resources.
+ */
+/* Ethernet Data path Host Structures */
+/*
+ * Description: The following three sections document the host structures used
+ * between device and software drivers for communicating Ethernet packets.
+ */
+/* BD Ring Structures */
+/*
+ * Description: This structure is used to inform the NIC of a location for and
+ * an aggregation buffer that will be used for packet data that is received. An
+ * aggregation buffer creates a different kind of completion operation for a
+ * packet where a variable number of BDs may be used to place the packet in the
+ * host. RX Rings that have aggregation buffers are known as aggregation rings
+ * and must contain only aggregation buffers.
+ */
+/* Short TX BD (16 bytes) */
struct tx_bd_short {
uint16_t flags_type;
/*
@@ -149,10 +219,10 @@ struct tx_bd_short {
/*
* This value indicates how many 16B BD locations are consumed
* in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
+ * BD is the only BD (and that the it is a short BD). A value of
* 3 indicates either 3 short BDs or 1 long BD and one short BD
* in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
+ * locations in the packet (the maximum). This field is valid
* only on the first BD of a packet.
*/
#define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
@@ -173,7 +243,8 @@ struct tx_bd_short {
#define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
#define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K
+ #define TX_BD_SHORT_FLAGS_LHINT_LAST \
+ TX_BD_SHORT_FLAGS_LHINT_GTE2K
/*
* If set to 1, the device immediately updates the Send Consumer
* Index after the buffer associated with this descriptor has
@@ -213,7 +284,7 @@ struct tx_bd_short {
*/
} __attribute__((packed));
-/* Long TX BD (32 bytes split to 2 16-byte struct) */
+/* Long TX BD (32 bytes split to 2 16-byte struct) */
struct tx_bd_long {
uint16_t flags_type;
/*
@@ -246,10 +317,10 @@ struct tx_bd_long {
/*
* This value indicates how many 16B BD locations are consumed
* in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
+ * BD is the only BD (and that the it is a short BD). A value of
* 3 indicates either 3 short BDs or 1 long BD and one short BD
* in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
+ * locations in the packet (the maximum). This field is valid
* only on the first BD of a packet.
*/
#define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
@@ -270,7 +341,8 @@ struct tx_bd_long {
#define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
#define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
+ #define TX_BD_LONG_FLAGS_LHINT_LAST \
+ TX_BD_LONG_FLAGS_LHINT_GTE2K
/*
* If set to 1, the device immediately updates the Send Consumer
* Index after the buffer associated with this descriptor has
@@ -360,7 +432,7 @@ struct tx_bd_long_hi {
* bit is set, outer UDP checksum will be calculated for the
* following cases: 1. Packets with tcp_udp_chksum flag set to
* offload checksum for inner packet AND the inner packet is
- * TCP/UDP. If the inner packet is ICMP for example (non-
+ * TCP/UDP. If the inner packet is ICMP for example (non-
* TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
* checksum will not be calculated. 2. Packets with lso flag set
* which implies inner TCP checksum calculation as part of LSO
@@ -392,7 +464,7 @@ struct tx_bd_long_hi {
* to one when LSO is '1', then the IPID of the tunnel IP header
* will be incremented for each subsequent segment of an LSO
* operation. The flag is ignored if the LSO packet is a normal
- * (non-tunneled) TCP packet.
+ * (non-tunneled) TCP packet.
*/
#define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
/*
@@ -460,7 +532,7 @@ struct tx_bd_long_hi {
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
/* Value programmed in CFA VLANTPID register. */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
- #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
/* When key=1, This is the VLAN tag TPID select value. */
#define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
@@ -474,15 +546,16 @@ struct tx_bd_long_hi {
/* No editing */
#define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
/*
- * - meta[17:16] - TPID select value (0 =
+ * - meta[17:16] - TPID select value (0 =
* 0x8100). - meta[15:12] - PRI/DE value. -
* meta[11:0] - VID value.
*/
#define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
- #define TX_BD_LONG_CFA_META_KEY_LAST TX_BD_LONG_CFA_META_KEY_VLAN_TAG
+ #define TX_BD_LONG_CFA_META_KEY_LAST \
+ TX_BD_LONG_CFA_META_KEY_VLAN_TAG
} __attribute__((packed));
-/* RX Producer Packet BD (16 bytes) */
+/* RX Producer Packet BD (16 bytes) */
struct rx_prod_pkt_bd {
uint16_t flags_type;
/* This value identifies the type of buffer descriptor. */
@@ -490,7 +563,7 @@ struct rx_prod_pkt_bd {
#define RX_PROD_PKT_BD_TYPE_SFT 0
/*
* Indicates that this BD is 16B long and is an
- * RX Producer (ie. empty) buffer descriptor.
+ * RX Producer (ie. empty) buffer descriptor.
*/
#define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
/*
@@ -558,7 +631,7 @@ struct rx_prod_pkt_bd {
/* Completion Ring Structures */
/* Note: This structure is used by the HWRM to communicate HWRM Error. */
-/* Base Completion Record (16 bytes) */
+/* Base Completion Record (16 bytes) */
struct cmpl_base {
uint16_t type;
/* unused is 10 b */
@@ -637,7 +710,7 @@ struct cmpl_base {
/* info4 is 32 b */
} __attribute__((packed));
-/* TX Completion Record (16 bytes) */
+/* TX Completion Record (16 bytes) */
struct tx_cmpl {
uint16_t flags_type;
/*
@@ -689,7 +762,7 @@ struct tx_cmpl {
#define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
/* Bad Format: BDs were not formatted correctly. */
#define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
- #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
/*
* When this bit is '1', it indicates that the length of the
@@ -726,7 +799,7 @@ struct tx_cmpl {
/* unused3 is 32 b */
} __attribute__((packed));
-/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
+/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
struct rx_pkt_cmpl {
uint16_t flags_type;
/*
@@ -741,7 +814,9 @@ struct rx_pkt_cmpl {
* RX L2 completion: Completion of and L2 RX
* packet. Length = 32B
*/
- #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2_TPA_START UINT32_C(0x13)
+ #define RX_PKT_CMPL_TYPE_RX_L2_TPA_END UINT32_C(0x15)
/*
* When this bit is '1', it indicates a packet that has an error
* of some type. Type of error is indicated in error_flags.
@@ -761,10 +836,12 @@ struct rx_pkt_cmpl {
* field.
*/
#define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
/* This bit is '1' if the RSS field in this completion is valid. */
#define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
/* unused is 1 b */
+ #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800)
/*
* This value indicates what the inner packet determined for the
* packet was.
@@ -820,7 +897,8 @@ struct rx_pkt_cmpl {
* that a timestamp was taken for the packet.
*/
#define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
- #define RX_PKT_CMPL_FLAGS_ITYPE_LAST RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
#define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
#define RX_PKT_CMPL_FLAGS_SFT 6
uint16_t len;
@@ -938,7 +1016,7 @@ struct rx_pkt_cmpl_hi {
* the vlan TPID value.
*/
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
/*
* This field indicates the IP type for the inner-most IP
@@ -988,15 +1066,18 @@ struct rx_pkt_cmpl_hi {
* means that the packet could not be placed
* into 7 physical buffers or less.
*/
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (UINT32_C(0x1) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \
+ (UINT32_C(0x1) << 1)
/*
* Not On Chip: All BDs needed for the packet
* were not on-chip when the packet arrived.
*/
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (UINT32_C(0x2) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
/* Bad Format: BDs were not formatted correctly. */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (UINT32_C(0x3) << 1)
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \
+ (UINT32_C(0x3) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
/* This indicates that there was an error in the IP header checksum. */
#define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
@@ -1037,39 +1118,45 @@ struct rx_pkt_cmpl_hi {
* match expectation from L2 Ethertype for IPv4
* and IPv6 in the tunnel header.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (UINT32_C(0x1) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 9)
/*
* Indicates that header length is out of range
* in the tunnel header. Valid for IPv4.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (UINT32_C(0x2) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 9)
/*
* Indicates that the physical packet is shorter
* than that claimed by the PPPoE header length
* for a tunnel PPPoE packet.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (UINT32_C(0x3) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \
+ (UINT32_C(0x3) << 9)
/*
* Indicates that physical packet is shorter
* than that claimed by the tunnel l3 header
* length. Valid for IPv4, or IPv6 tunnel packet
* packets.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (UINT32_C(0x4) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 9)
/*
* Indicates that the physical packet is shorter
* than that claimed by the tunnel UDP header
* length for a tunnel UDP packet that is not
* fragmented.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (UINT32_C(0x5) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 9)
/*
* indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0) in the
+ * check have failed (e.g. TTL = 0) in the
* tunnel header. Valid for IPv4, and IPv6.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (UINT32_C(0x6) << 9)
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \
+ (UINT32_C(0x6) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
/*
* This indicates that there was an error in the inner portion
@@ -1089,15 +1176,17 @@ struct rx_pkt_cmpl_hi {
* and IPv6 or that option other than VFT was
* parsed on FCoE packet.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (UINT32_C(0x1) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 12)
/*
* indicates that header length is out of range.
* Valid for IPv4 and RoCE
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (UINT32_C(0x2) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 12)
/*
* indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0). Valid for
+ * check have failed (e.g. TTL = 0). Valid for
* IPv4, and IPv6
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
@@ -1106,18 +1195,21 @@ struct rx_pkt_cmpl_hi {
* than that claimed by the l3 header length.
* Valid for IPv4, IPv6 packet or RoCE packets.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (UINT32_C(0x4) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 12)
/*
* Indicates that the physical packet is shorter
* than that claimed by the UDP header length
* for a UDP packet that is not fragmented.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (UINT32_C(0x5) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 12)
/*
* Indicates that TCP header length > IP
* payload. Valid for TCP packets only.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (UINT32_C(0x6) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \
+ (UINT32_C(0x6) << 12)
/* Indicates that TCP header length < 5. Valid for TCP. */
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
(UINT32_C(0x7) << 12)
@@ -1126,9 +1218,9 @@ struct rx_pkt_cmpl_hi {
* TCP header size that does not match data
* offset in TCP header. Valid for TCP.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
(UINT32_C(0x8) << 12)
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
#define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
#define RX_PKT_CMPL_ERRORS_SFT 1
@@ -1149,7 +1241,474 @@ struct rx_pkt_cmpl_hi {
#define RX_PKT_CMPL_REORDER_SFT 0
} __attribute__((packed));
-/* HWRM Forwarded Request (16 bytes) */
+/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */
+struct rx_tpa_start_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_START_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA Start Completion: Completion at the
+ * beginning of a TPA operation. Length = 32B
+ */
+ #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ /* This bit will always be '0' for TPA start completions. */
+ #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo: TPA Packet was placed using jumbo
+ * algorithm. This means that the first buffer
+ * will be filled with data before moving to
+ * aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next
+ * aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation: Packet was placed
+ * using Header/Data separation algorithm. The
+ * separation location is indicated by the itype
+ * field.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo: Packet will be placed using
+ * GRO/Jumbo where the first packet is filled
+ * with data. Subsequent packets will be placed
+ * such that any one packet does not span two
+ * aggregation buffers unless it starts at the
+ * beginning of an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
+ (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation: Packet will be
+ * placed using GRO/HDS where the header is in
+ * the first packet. Payload of each packet will
+ * be placed such that any one packet does not
+ * span two aggregation buffers unless it starts
+ * at the beginning of an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* This bit is '1' if the RSS field in this completion is valid. */
+ #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ /* unused is 1 b */
+ #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
+ /* TCP Packet: Indicates that the packet was IP and TCP. */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \
+ RX_TPA_START_CMPL_FLAGS_ITYPE_TCP
+ #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_START_CMPL_FLAGS_SFT 6
+ uint16_t len;
+ /*
+ * This value indicates the amount of packet data written to the
+ * buffer the opaque field in this completion corresponds to.
+ */
+ uint32_t opaque;
+ /*
+ * This is a copy of the opaque field from the RX BD this
+ * completion corresponds to.
+ */
+ uint8_t v1;
+ /* unused1 is 7 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
+ /* unused1 is 7 b */
+ uint8_t rss_hash_type;
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
+ * . The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on. * 0: The RSS hash was
+ * computed over source IP address, destination IP address,
+ * source port, and destination port of inner IP and TCP or UDP
+ * headers. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 1: The RSS hash was computed over
+ * source IP address and destination IP address of inner IP
+ * header. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 2: The RSS hash was computed over
+ * source IP address, destination IP address, source port, and
+ * destination port of IP and TCP or UDP headers of outer tunnel
+ * headers. Note: For non-tunneled packets, this value is not
+ * applicable. * 3: The RSS hash was computed over source IP
+ * address and destination IP address of IP header of outer
+ * tunnel headers. Note: For non-tunneled packets, this value is
+ * not applicable. Note that 4-tuples values listed above are
+ * applicable for layer 4 protocols supported and enabled for
+ * RSS in the hardware, HWRM firmware, and drivers. For example,
+ * if RSS hash is supported and enabled for TCP traffic only,
+ * then the values of tuple_extract_op corresponding to 4-tuples
+ * are only valid for TCP traffic.
+ */
+ uint16_t agg_id;
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ /* unused2 is 9 b */
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
+ #define RX_TPA_START_CMPL_AGG_ID_SFT 9
+ uint32_t rss_hash;
+ /*
+ * This value is the RSS hash value calculated for the packet
+ * based on the mode bits and key value in the VNIC.
+ */
+} __attribute__((packed));
+
+/* last 16 bytes of RX L2 TPA Start Completion Record */
+struct rx_tpa_start_cmpl_hi {
+ uint32_t flags2;
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * inner packet and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ /*
+ * This indicates that the TCP, UDP or ICMP checksum was
+ * calculated for the inner packet and that the sum passed for
+ * all segments included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * tunnel header and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ /*
+ * This indicates that the UDP checksum was calculated for the
+ * tunnel packet and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ /* This value indicates what format the metadata field is. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and
+ * TPID value. - metadata[11:0] contains the
+ * vlan VID value. - metadata[12] contains the
+ * vlan DE value. - metadata[15:13] contains the
+ * vlan PRI value. - metadata[31:16] contains
+ * the vlan TPID value.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \
+ RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN
+ /*
+ * This field indicates the IP type for the inner-most IP
+ * header. A value of '0' indicates IPv4. A value of '1'
+ * indicates IPv6.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+ uint32_t metadata;
+ /*
+ * This is data from the CFA block as indicated by the
+ * meta_format field.
+ */
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
+ /* When meta_format=1, this value is the VLAN TPID. */
+ #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
+ uint16_t v2;
+ /* unused4 is 15 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
+ /* unused4 is 15 b */
+ uint16_t cfa_code;
+ /*
+ * This field identifies the CFA action rule that was used for
+ * this packet.
+ */
+ uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
+ /*
+ * This is the size in bytes of the inner most L4 header. This
+ * can be subtracted from the payload_offset to determine the
+ * start of the inner most L4 header.
+ */
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the outer L3 header. If there is no outer L3 header, then
+ * this value is zero.
+ */
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the inner most L2 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the inner most L3 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
+ /*
+ * This is the size in bytes of the inner most L4 header. This
+ * can be subtracted from the payload_offset to determine the
+ * start of the inner most L4 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
+} __attribute__((packed));
+
+/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */
+struct rx_tpa_end_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_END_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA End Completion: Completion at the
+ * end of a TPA operation. Length = 32B
+ */
+ #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ /*
+ * When this bit is '1', it indicates a packet that has an error
+ * of some type. Type of error is indicated in error_flags.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo: TPA Packet was placed using jumbo
+ * algorithm. This means that the first buffer
+ * will be filled with data before moving to
+ * aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next
+ * aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation: Packet was placed
+ * using Header/Data separation algorithm. The
+ * separation location is indicated by the itype
+ * field.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo: Packet will be placed using
+ * GRO/Jumbo where the first packet is filled
+ * with data. Subsequent packets will be placed
+ * such that any one packet does not span two
+ * aggregation buffers unless it starts at the
+ * beginning of an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation: Packet will be
+ * placed using GRO/HDS where the header is in
+ * the first packet. Payload of each packet will
+ * be placed such that any one packet does not
+ * span two aggregation buffers unless it starts
+ * at the beginning of an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* unused is 2 b */
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was. - 2 TCP Packet Indicates that the packet was IP
+ * and TCP. This indicates that the ip_cs field is valid and
+ * that the tcp_udp_cs field is valid and contains the TCP
+ * checksum. This also indicates that the payload_offset field
+ * is valid.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
+ #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_END_CMPL_FLAGS_SFT 6
+ uint16_t len;
+ /*
+ * This value is zero for TPA End completions. There is no data
+ * in the buffer that corresponds to the opaque value in this
+ * completion.
+ */
+ uint32_t opaque;
+ /*
+ * This is a copy of the opaque field from the RX BD this
+ * completion corresponds to.
+ */
+ uint8_t agg_bufs_v1;
+ /* unused1 is 1 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
+ /*
+ * This value is the number of aggregation buffers that follow
+ * this entry in the completion ring that are a part of this
+ * aggregation packet. If the value is zero, then the packet is
+ * completely contained in the buffer space provided in the
+ * aggregation start completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
+ #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
+ /* unused1 is 1 b */
+ uint8_t tpa_segs;
+ /* This value is the number of segments in the TPA operation. */
+ uint8_t payload_offset;
+ /*
+ * This value indicates the offset in bytes from the beginning
+ * of the packet where the inner payload starts. This value is
+ * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
+ * indicates an offset of 256 bytes.
+ */
+ uint8_t agg_id;
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ /* unused2 is 1 b */
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
+ #define RX_TPA_END_CMPL_AGG_ID_SFT 1
+ uint32_t tsdelta;
+ /*
+ * For non-GRO packets, this value is the timestamp delta
+ * between earliest and latest timestamp values for TPA packet.
+ * If packets were not time stamped, then delta will be zero.
+ * For GRO packets, this field is zero except for the following
+ * sub-fields. - tsdelta[31] Timestamp present indication. When
+ * '0', no Timestamp option is in the packet. When '1', then a
+ * Timestamp option is present in the packet.
+ */
+} __attribute__((packed));
+
+/* last 16 bytes of RX TPA End Completion Record */
+struct rx_tpa_end_cmpl_hi {
+ uint32_t tpa_dup_acks;
+ /* unused3 is 28 b */
+ /*
+ * This value is the number of duplicate ACKs that have been
+ * received as part of the TPA operation.
+ */
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
+ /* unused3 is 28 b */
+ uint16_t tpa_seg_len;
+ /*
+ * This value is the valid when TPA completion is active. It
+ * indicates the length of the longest segment of the TPA
+ * operation for LRO mode and the length of the first segment in
+ * GRO mode. This value may be used by GRO software to re-
+ * construct the original packet stream from the TPA packet.
+ * This is the length of all but the last segment for GRO. In
+ * LRO mode this value may be used to indicate MSS size to the
+ * stack.
+ */
+ uint16_t unused_3;
+ /* unused4 is 16 b */
+ uint16_t errors_v2;
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ /*
+ * This error indicates that there was some sort of problem with
+ * the BDs for the packet that was found after part of the
+ * packet was already placed. The packet should be treated as
+ * invalid.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /*
+ * This error occurs when there is a fatal HW
+ * problem in the chip only. It indicates that
+ * there were not BDs on chip but that there was
+ * adequate reservation. provided by the TPA
+ * block.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
+ /*
+ * This error occurs when TPA block was not
+ * configured to reserve adequate BDs for TPA
+ * operations on this RX ring. All data for the
+ * TPA operation was not placed. This error can
+ * also be generated when the number of segments
+ * is not programmed correctly in TPA and the 33
+ * total aggregation buffers allowed for the TPA
+ * operation has been exceeded.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \
+ (UINT32_C(0x4) << 1)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR
+ #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_TPA_END_CMPL_ERRORS_SFT 1
+ uint16_t unused_4;
+ /* unused5 is 16 b */
+ uint32_t start_opaque;
+ /*
+ * This is the opaque value that was completed for the TPA start
+ * completion that corresponds to this TPA end completion.
+ */
+} __attribute__((packed));
+
+/* HWRM Forwarded Request (16 bytes) */
struct hwrm_fwd_req_cmpl {
uint16_t req_len_type;
/* Length of forwarded request in bytes. */
@@ -1188,7 +1747,7 @@ struct hwrm_fwd_req_cmpl {
#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
} __attribute__((packed));
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
struct hwrm_async_event_cmpl {
uint16_t type;
/* unused1 is 10 b */
@@ -1210,19 +1769,20 @@ struct hwrm_async_event_cmpl {
/* Link MTU changed */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1)
/* Link speed changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2)
/* DCB Configuration changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3)
/* Port connection not allowed */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4)
/* Link speed configuration was not allowed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED UINT32_C(0x5)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
+ UINT32_C(0x5)
/* Link speed configuration change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6)
/* Port PHY configuration change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7)
/* Function driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10)
/* Function driver loaded */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11)
/* Function FLR related processing has completed */
@@ -1231,12 +1791,13 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20)
/* PF driver loaded */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21)
- /* VF Function Level Reset (FLR) */
+ /* VF Function Level Reset (FLR) */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30)
/* VF MAC Address Change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31)
/* PF-VF communication channel status change. */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE UINT32_C(0x32)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
+ UINT32_C(0x32)
/* VF Configuration Change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33)
/* HWRM Error */
@@ -1255,69 +1816,13 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
uint8_t timestamp_lo;
- /* 8-lsb timestamp from POR (100-msec resolution) */
+ /* 8-lsb timestamp from POR (100-msec resolution) */
uint16_t timestamp_hi;
- /* 16-lsb timestamp from POR (100-msec resolution) */
+ /* 16-lsb timestamp from POR (100-msec resolution) */
uint32_t event_data1;
/* Event specific data */
} __attribute__((packed));
-/*
- * Note: The Hardware Resource Manager (HWRM) manages various hardware resources
- * inside the chip. The HWRM is implemented in firmware, and runs on embedded
- * processors inside the chip. This firmware service is vital part of the chip.
- * The chip can not be used by a driver or HWRM client without the HWRM.
- */
-
-/* Input (16 bytes) */
-struct input {
- uint16_t req_type;
- /*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
- */
- uint16_t cmpl_ring;
- /*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
- */
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
- /*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
- */
- uint64_t resp_addr;
- /*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
- */
-} __attribute__((packed));
-
-/* Output (8 bytes) */
-struct output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
-} __attribute__((packed));
-
/* hwrm_ver_get */
/*
* Description: This function is called by a driver to determine the HWRM
@@ -1327,7 +1832,7 @@ struct output {
* interface or firmware version with major = 0, minor = 0, and update = 0 shall
* be considered an invalid version.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ver_get_input {
uint16_t req_type;
/*
@@ -1384,7 +1889,7 @@ struct hwrm_ver_get_input {
uint8_t unused_0[5];
} __attribute__((packed));
-/* Output (128 bytes) */
+/* Output (128 bytes) */
struct hwrm_ver_get_output {
uint16_t error_code;
/*
@@ -1454,7 +1959,7 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version of the HWRM
+ * tied to a specific (major,minor,update) version of the HWRM
* firmware.
*/
uint8_t mgmt_fw_maj;
@@ -1477,7 +1982,7 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * tied to a specific (major,minor,update) version
*/
uint8_t netctrl_fw_maj;
/*
@@ -1500,7 +2005,7 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * tied to a specific (major,minor,update) version
*/
uint32_t dev_caps_cfg;
/*
@@ -1512,13 +2017,27 @@ struct hwrm_ver_get_output {
* supported. If set to 0, then secure firmware update behavior
* is not supported.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED UINT32_C(0x1)
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \
+ UINT32_C(0x1)
/*
* If set to 1, then firmware based DCBX agent is supported. If
* set to 0, then firmware based DCBX agent capability is not
* supported on this device.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED UINT32_C(0x2)
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, then HWRM short command format is supported. If
+ * set to 0, then HWRM short command format is not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, then HWRM short command format is required. If
+ * set to 0, then HWRM short command format is not required.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED \
+ UINT32_C(0x8)
uint8_t roce_fw_maj;
/*
* This field represents the major version of RoCE firmware. A
@@ -1539,22 +2058,22 @@ struct hwrm_ver_get_output {
/*
* This field is a reserved field. This field can be used to
* represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * tied to a specific (major,minor,update) version
*/
char hwrm_fw_name[16];
/*
- * This field represents the name of HWRM FW (ASCII chars with
+ * This field represents the name of HWRM FW (ASCII chars with
* NULL at the end).
*/
char mgmt_fw_name[16];
/*
- * This field represents the name of mgmt FW (ASCII chars with
+ * This field represents the name of mgmt FW (ASCII chars with
* NULL at the end).
*/
char netctrl_fw_name[16];
/*
* This field represents the name of network control firmware
- * (ASCII chars with NULL at the end).
+ * (ASCII chars with NULL at the end).
*/
uint32_t reserved2[4];
/*
@@ -1563,7 +2082,7 @@ struct hwrm_ver_get_output {
*/
char roce_fw_name[16];
/*
- * This field represents the name of RoCE FW (ASCII chars with
+ * This field represents the name of RoCE FW (ASCII chars with
* NULL at the end).
*/
uint16_t chip_num;
@@ -1584,7 +2103,7 @@ struct hwrm_ver_get_output {
/* FPGA platform of the chip. */
#define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
/* Palladium platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
uint16_t max_req_win_len;
/*
* This field returns the maximum value of request window that
@@ -1614,7 +2133,7 @@ struct hwrm_ver_get_output {
/* hwrm_func_reset */
/*
- * Description: This command resets a hardware function (PCIe function) and
+ * Description: This command resets a hardware function (PCIe function) and
* frees any resources used by the function. This command shall be initiated by
* the driver after an FLR has occurred to prepare the function for re-use. This
* command may also be initiated by a driver prior to doing it's own
@@ -1626,7 +2145,7 @@ struct hwrm_ver_get_output {
* idled. The command returns all the resources owned by the function so a new
* driver may allocate and configure resources normally.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_reset_input {
uint16_t req_type;
/*
@@ -1668,7 +2187,7 @@ struct hwrm_func_reset_input {
/* This value indicates the level of a function reset. */
/*
* Reset the caller function and its children
- * VFs (if any). If no children functions exist,
+ * VFs (if any). If no children functions exist,
* then reset the caller function only.
*/
#define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0)
@@ -1681,7 +2200,8 @@ struct hwrm_func_reset_input {
* It is an error to specify this level by a PF
* driver with no children VFs.
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN UINT32_C(0x2)
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
+ UINT32_C(0x2)
/*
* Reset a specific VF of the caller function
* driver if the caller is the parent PF driver.
@@ -1694,7 +2214,7 @@ struct hwrm_func_reset_input {
uint8_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_func_reset_output {
uint16_t error_code;
/*
@@ -1734,7 +2254,7 @@ struct hwrm_func_reset_output {
* physical function. The output FID value is needed to configure Rings and
* MSI-X vectors so their DMA operations appear correctly on the PCI bus.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_qcaps_input {
uint16_t req_type;
/*
@@ -1765,12 +2285,12 @@ struct hwrm_func_qcaps_input {
uint16_t fid;
/*
* Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * (All Fs) if the query is for the requesting function.
*/
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (80 bytes) */
+/* Output (80 bytes) */
struct hwrm_func_qcaps_output {
uint16_t error_code;
/*
@@ -1795,54 +2315,56 @@ struct hwrm_func_qcaps_output {
uint16_t port_id;
/*
* Port ID of port that this function is associated with. Valid
- * only for the PF. 0xFF... (All Fs) if this function is not
- * associated with any port. 0xFF... (All Fs) if this function
+ * only for the PF. 0xFF... (All Fs) if this function is not
+ * associated with any port. 0xFF... (All Fs) if this function
* is called from a VF.
*/
uint32_t flags;
/* If 1, then Push mode is supported on this function. */
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
/*
* If 1, then the global MSI-X auto-masking is enabled for the
* device.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING UINT32_C(0x2)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
+ UINT32_C(0x2)
/*
- * If 1, then the Precision Time Protocol (PTP) processing is
+ * If 1, then the Precision Time Protocol (PTP) processing is
* supported on this function. The HWRM should enable PTP on
- * only a single Physical Function (PF) per port.
+ * only a single Physical Function (PF) per port.
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v1 is
+ * If 1, then RDMA over Converged Ethernet (RoCE) v1 is
* supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v2 is
+ * If 1, then RDMA over Converged Ethernet (RoCE) v2 is
* supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10)
/*
* If 1, then control and configuration of WoL magic packet are
* supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED UINT32_C(0x20)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \
+ UINT32_C(0x20)
/*
* If 1, then control and configuration of bitmap pattern packet
* are supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40)
/*
* If set to 1, then the control and configuration of rate limit
* of an allocated TX ring on the queried function is supported.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80)
/*
* If 1, then control and configuration of minimum and maximum
* bandwidths are supported on the queried function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100)
/*
* If the query is for a VF, then this flag shall be ignored. If
* this query is for a PF and this flag is set to 1, then the PF
@@ -1851,7 +2373,8 @@ struct hwrm_func_qcaps_output {
* set to 0, then the PF does not have the capability to set the
* rate limits on the TX rings of its children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED UINT32_C(0x200)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \
+ UINT32_C(0x200)
/*
* If the query is for a VF, then this flag shall be ignored. If
* this query is for a PF and this flag is set to 1, then the PF
@@ -1861,7 +2384,17 @@ struct hwrm_func_qcaps_output {
* capability to set the minimum or maximum bandwidths for its
* children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then standard TX ring mode is supported on the
+ * queried function. If set to 0, then standard TX ring mode is
+ * not available on the queried function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \
+ UINT32_C(0x800)
uint8_t mac_address[6];
/*
* This value is current MAC address configured for this
@@ -1901,7 +2434,7 @@ struct hwrm_func_qcaps_output {
uint16_t first_vf_id;
/*
* The identifier for the first VF enabled on a PF. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
+ * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
* this command is called on a PF with SR-IOV disabled or on a
* VF.
*/
@@ -1909,7 +2442,7 @@ struct hwrm_func_qcaps_output {
/*
* The maximum number of VFs that can be allocated to the
* function. This is valid only on the PF with SR-IOV enabled.
- * 0xFF... (All Fs) if this command is called on a PF with SR-
+ * 0xFF... (All Fs) if this command is called on a PF with SR-
* IOV disabled or on a VF.
*/
uint16_t max_stat_ctx;
@@ -1929,22 +2462,22 @@ struct hwrm_func_qcaps_output {
*/
uint32_t max_tx_em_flows;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the TX side.
*/
uint32_t max_tx_wm_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
+ * The maximum number of Wildcard Match (WM) flows that can be
* offloaded by this function on the TX side.
*/
uint32_t max_rx_em_flows;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the RX side.
*/
uint32_t max_rx_wm_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
+ * The maximum number of Wildcard Match (WM) flows that can be
* offloaded by this function on the RX side.
*/
uint32_t max_mcast_filters;
@@ -1968,7 +2501,7 @@ struct hwrm_func_qcaps_output {
* be allocated to the function. This number indicates the
* maximum number of TX rings that can be assigned strict
* priorities out of the maximum number of TX rings that can be
- * allocated (max_tx_rings) to the function.
+ * allocated (max_tx_rings) to the function.
*/
uint8_t unused_0;
uint8_t valid;
@@ -1989,9 +2522,13 @@ struct hwrm_func_qcaps_output {
* allows a physical function driver to query virtual functions that are
* children of the physical function. The output FID value is needed to
* configure Rings and MSI-X vectors so their DMA operations appear correctly on
- * the PCI bus.
+ * the PCI bus. This command should be called by every driver after
+ * 'hwrm_func_cfg' to get the actual number of resources allocated by the HWRM.
+ * The values returned by hwrm_func_qcfg are the values the driver shall use.
+ * These values may be different than what was originally requested in the
+ * 'hwrm_func_cfg' command.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_qcfg_input {
uint16_t req_type;
/*
@@ -2022,12 +2559,12 @@ struct hwrm_func_qcfg_input {
uint16_t fid;
/*
* Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * (All Fs) if the query is for the requesting function.
*/
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (72 bytes) */
+/* Output (72 bytes) */
struct hwrm_func_qcfg_output {
uint16_t error_code;
/*
@@ -2052,7 +2589,7 @@ struct hwrm_func_qcfg_output {
uint16_t port_id;
/*
* Port ID of port that this function is associated with.
- * 0xFF... (All Fs) if this function is not associated with any
+ * 0xFF... (All Fs) if this function is not associated with any
* port.
*/
uint16_t vlan;
@@ -2060,15 +2597,15 @@ struct hwrm_func_qcfg_output {
* This value is the current VLAN setting for this function. The
* value of 0 for this field indicates no priority tagging or
* VLAN is used. This field's format is same as 802.1Q Tag's Tag
- * Control Information (TCI) format that includes both Priority
- * Code Point (PCP) and VLAN Identifier (VID).
+ * Control Information (TCI) format that includes both Priority
+ * Code Point (PCP) and VLAN Identifier (VID).
*/
uint16_t flags;
/*
* If 1, then magic packet based Out-Of-Box WoL is enabled on
* the port associated with this function.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
UINT32_C(0x1)
/*
* If 1, then bitmap pattern based Out-Of-Box WoL packet is
@@ -2080,8 +2617,33 @@ struct hwrm_func_qcfg_output {
* on the port associated with this function. If set to 0, then
* DCBX agent is not running in the firmware.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
UINT32_C(0x4)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then standard TX ring mode is enabled on the
+ * queried function. If set to 0, then the standard TX ring mode
+ * is disabled on the queried function. In this extended TX ring
+ * resource mode, the minimum and maximum bandwidth settings are
+ * not supported to allow the allocation of TX rings to span
+ * multiple scheduler nodes.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1 then FW based LLDP agent is enabled and running
+ * on the port associated with this function. If set to 0 then
+ * the LLDP agent is not running in the firmware.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED UINT32_C(0x10)
+ /*
+ * If set to 1, then multi-host mode is active for this
+ * function. If set to 0, then multi-host mode is inactive for
+ * this function or not applicable for this device.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST UINT32_C(0x20)
uint8_t mac_address[6];
/*
* This value is current MAC address configured for this
@@ -2091,8 +2653,8 @@ struct hwrm_func_qcfg_output {
uint16_t pci_id;
/*
* This value is current PCI ID of this function. If ARI is
- * enabled, then it is Bus Number (8b):Function Number(8b).
- * Otherwise, it is Bus Number (8b):Device Number (4b):Function
+ * enabled, then it is Bus Number (8b):Function Number(8b).
+ * Otherwise, it is Bus Number (8b):Device Number (4b):Function
* Number(4b).
*/
uint16_t alloc_rsscos_ctx;
@@ -2146,22 +2708,27 @@ struct hwrm_func_qcfg_output {
/* Multiple physical functions */
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
/* Network Partitioning 1.0 */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 \
- UINT32_C(0x2)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2)
/* Network Partitioning 1.5 */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 \
- UINT32_C(0x3)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3)
/* Network Partitioning 2.0 */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 \
- UINT32_C(0x4)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4)
/* Unknown */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \
- UINT32_C(0xff)
- uint8_t unused_0;
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff)
+ uint8_t port_pf_cnt;
+ /*
+ * This field will indicate number of physical functions on this
+ * port_partition. HWRM shall return unavail (i.e. value of 0)
+ * for this field when this command is used to query VF's
+ * configuration or from older firmware that doesn't support
+ * this field.
+ */
+ /* number of PFs is not available */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
uint16_t dflt_vnic_id;
/* The default VNIC ID assigned to a function that is being queried. */
+ uint8_t unused_0;
uint8_t unused_1;
- uint8_t unused_2;
uint32_t min_bw;
/*
* Minimum BW allocated for this function. The HWRM will
@@ -2169,26 +2736,41 @@ struct hwrm_func_qcfg_output {
* for the scheduler inside the device. A value of 0 indicates
* the minimum bandwidth is not configured.
*/
- /* Bandwidth value */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
+ /* The bandwidth value. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
- /* Reserved */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_RSVD UINT32_C(0x10000000)
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \
+ FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mbps */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MBPS \
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
(UINT32_C(0x1) << 29)
/* Invalid unit */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
uint32_t max_bw;
/*
@@ -2197,43 +2779,58 @@ struct hwrm_func_qcfg_output {
* for the scheduler inside the device. A value of 0 indicates
* that the maximum bandwidth is not configured.
*/
- /* Bandwidth value */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
+ /* The bandwidth value. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
- /* Reserved */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_RSVD UINT32_C(0x10000000)
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \
+ FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mbps */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MBPS \
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
(UINT32_C(0x1) << 29)
/* Invalid unit */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
uint8_t evb_mode;
/*
* This value indicates the Edge virtual bridge mode for the
* domain that this function belongs to.
*/
- /* No Edge Virtual Bridging (EVB) */
+ /* No Edge Virtual Bridging (EVB) */
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
- /* Virtual Ethernet Bridge (VEB) */
+ /* Virtual Ethernet Bridge (VEB) */
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
- /* Virtual Ethernet Port Aggregator (VEPA) */
+ /* Virtual Ethernet Port Aggregator (VEPA) */
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
- uint8_t unused_3;
+ uint8_t unused_2;
uint16_t alloc_vfs;
/*
* The number of VFs that are allocated to the function. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
+ * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
* this command is called on a PF with SR-IOV disabled or on a
* VF.
*/
@@ -2247,9 +2844,957 @@ struct hwrm_func_qcfg_output {
uint16_t alloc_sp_tx_rings;
/*
* The number of strict priority transmit rings out of currently
- * allocated TX rings to the function (alloc_tx_rings).
+ * allocated TX rings to the function (alloc_tx_rings).
+ */
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_vlan_qcfg */
+/*
+ * Description: This command should be called by PF driver to get the current
+ * C-TAG, S-TAG and correcponsing PCP and TPID values configured for the
+ * function.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_vlan_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. If set
+ * to 0xFF... (All Fs), then the configuration is for the
+ * requesting function.
*/
+ uint16_t unused_0[3];
+};
+
+/* Output (40 bytes) */
+struct hwrm_func_vlan_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+ uint16_t stag_vid;
+ /* S-TAG VLAN identifier configured for the function. */
+ uint8_t stag_pcp;
+ /* S-TAG PCP value configured for the function. */
uint8_t unused_4;
+ uint16_t stag_tpid;
+ /*
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t ctag_vid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint8_t ctag_pcp;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t unused_5;
+ uint16_t ctag_tpid;
+ /*
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint32_t rsvd2;
+ /* Future use. */
+ uint32_t rsvd3;
+ /* Future use. */
+ uint32_t unused_6;
+};
+
+/* hwrm_func_vlan_cfg */
+/*
+ * Description: This command allows PF driver to configure C-TAG, S-TAG and
+ * corresponding PCP and TPID values for a function.
+ */
+/* Input (48 bytes) */
+struct hwrm_func_vlan_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. If set
+ * to 0xFF... (All Fs), then the configuration is for the
+ * requesting function.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t enables;
+ /* This bit must be '1' for the stag_vid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
+ /* This bit must be '1' for the ctag_vid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
+ /* This bit must be '1' for the stag_pcp field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
+ /* This bit must be '1' for the ctag_pcp field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
+ /* This bit must be '1' for the stag_tpid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
+ /* This bit must be '1' for the ctag_tpid field to be configured. */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
+ uint16_t stag_vid;
+ /* S-TAG VLAN identifier configured for the function. */
+ uint8_t stag_pcp;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t unused_2;
+ uint16_t stag_tpid;
+ /*
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t ctag_vid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint8_t ctag_pcp;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t unused_3;
+ uint16_t ctag_tpid;
+ /*
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint32_t rsvd1;
+ /* Future use. */
+ uint32_t rsvd2;
+ /* Future use. */
+ uint32_t unused_4;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vlan_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+};
+
+/* hwrm_func_cfg */
+/*
+ * Description: This command allows configuration of a PF by the corresponding
+ * PF driver. This command also allows configuration of a child VF by its parent
+ * PF driver. The input FID value is used to indicate what function is being
+ * configured. This allows a PF driver to configure the PF owned by itself or a
+ * virtual function that is a child of the PF. This command allows to reserve
+ * resources for a VF by its parent PF. To reverse the process, the command
+ * should be called with all enables flags cleared for resources. This will free
+ * allocated resources for the VF and return them to the resource pool. If this
+ * command is requested by a VF driver to configure or reserve resources, then
+ * the HWRM shall fail this command. If default MAC address and/or VLAN are
+ * provided in this command, then the HWRM shall set up appropriate MAC/VLAN
+ * filters for the function that is being configured. If source properties
+ * checks are enabled and default MAC address and/or IP address are provided in
+ * this command, then the HWRM shall set appropriate source property checks
+ * based on provided MAC and/or IP addresses. The parent PF driver should not
+ * set MTU/MRU for a VF using this command. This is to allow MTU/MRU setting by
+ * the VF driver. If the MTU or MRU for a VF is set by the PF driver, then the
+ * HWRM should ignore it. A function's MTU/MRU should be set prior to allocating
+ * RX VNICs or TX rings. A PF driver calls hwrm_func_cfg to allocate resources
+ * for itself or its children VFs. All function drivers shall call hwrm_func_cfg
+ * to reserve resources. A request to hwrm_func_cfg may not be fully granted;
+ * that is, a request for resources may be larger than what can be supported by
+ * the device and the HWRM will allocate the best set of resources available,
+ * but that may be less than requested. If all the amounts requested could not
+ * be fulfilled, the HWRM shall allocate what it could and return a status code
+ * of success. A function driver should call hwrm_func_qcfg immediately after
+ * hwrm_func_cfg to determine what resources were assigned to the configured
+ * function. A call by a PF driver to hwrm_func_cfg to allocate resources for
+ * itself shall only allocate resources for the PF driver to use, not for its
+ * children VFs. Likewise, a call to hwrm_func_qcfg shall return the resources
+ * available for the PF driver to use, not what is available to its children
+ * VFs.
+ */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. If set
+ * to 0xFF... (All Fs), then the the configuration is for the
+ * requesting function.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function is disabled with source
+ * MAC address check. This is an anti-spoofing check. If this
+ * flag is set, then the function shall be configured to
+ * disallow transmission of frames with the source MAC address
+ * that is configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the function is enabled with source MAC
+ * address check. This is an anti-spoofing check. If this flag
+ * is set, then the function shall be configured to allow
+ * transmission of frames with the source MAC address that is
+ * configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \
+ UINT32_C(0x2)
+ /* reserved */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK UINT32_C(0x1fc)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then standard TX ring mode is requested to be
+ * enabled on the function being configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \
+ UINT32_C(0x200)
+ /*
+ * Standard TX Ring mode is used for the allocation of TX ring
+ * and underlying scheduling resources that allow bandwidth
+ * reservation and limit settings on the queried function. If
+ * set to 1, then the standard TX ring mode is requested to be
+ * disabled on the function being configured. In this extended
+ * TX ring resource mode, the minimum and maximum bandwidth
+ * settings are not supported to allow the allocation of TX
+ * rings to span multiple scheduler nodes.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \
+ UINT32_C(0x400)
+ /*
+ * If this bit is set, virtual mac address configured in this
+ * command will be persistent over warm boot.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800)
+ /*
+ * This bit only applies to the VF. If this bit is set, the
+ * statistic context counters will not be cleared when the
+ * statistic context is freed or a function reset is called on
+ * VF. This bit will be cleared when the PF is unloaded or a
+ * function reset is called on the PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
+ UINT32_C(0x1000)
+ uint32_t enables;
+ /* This bit must be '1' for the mtu field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
+ /* This bit must be '1' for the mru field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the num_rsscos_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the num_cmpl_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS UINT32_C(0x8)
+ /* This bit must be '1' for the num_tx_rings field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x10)
+ /* This bit must be '1' for the num_rx_rings field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x20)
+ /* This bit must be '1' for the num_l2_ctxs field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x40)
+ /* This bit must be '1' for the num_vnics field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the num_stat_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dflt_mac_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x200)
+ /* This bit must be '1' for the dflt_vlan field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN UINT32_C(0x400)
+ /* This bit must be '1' for the dflt_ip_addr field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR UINT32_C(0x800)
+ /* This bit must be '1' for the min_bw field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW UINT32_C(0x1000)
+ /* This bit must be '1' for the max_bw field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the async_event_cr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the vlan_antispoof_mode field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the allowed_vlan_pris field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS UINT32_C(0x10000)
+ /* This bit must be '1' for the evb_mode field to be configured. */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE UINT32_C(0x20000)
+ /*
+ * This bit must be '1' for the num_mcast_filters field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS UINT32_C(0x40000)
+ /*
+ * This bit must be '1' for the num_hw_ring_grps field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x80000)
+ uint16_t mtu;
+ /*
+ * The maximum transmission unit of the function. The HWRM
+ * should make sure that the mtu of the function does not exceed
+ * the mtu of the physical port that this function is associated
+ * with. In addition to configuring mtu per function, it is
+ * possible to configure mtu per transmit ring. By default, the
+ * mtu of each transmit ring associated with a function is equal
+ * to the mtu of the function. The HWRM should make sure that
+ * the mtu of each transmit ring that is assigned to a function
+ * has a valid mtu.
+ */
+ uint16_t mru;
+ /*
+ * The maximum receive unit of the function. The HWRM should
+ * make sure that the mru of the function does not exceed the
+ * mru of the physical port that this function is associated
+ * with. In addition to configuring mru per function, it is
+ * possible to configure mru per vnic. By default, the mru of
+ * each vnic associated with a function is equal to the mru of
+ * the function. The HWRM should make sure that the mru of each
+ * vnic that is assigned to a function has a valid mru.
+ */
+ uint16_t num_rsscos_ctxs;
+ /* The number of RSS/COS contexts requested for the function. */
+ uint16_t num_cmpl_rings;
+ /*
+ * The number of completion rings requested for the function.
+ * This does not include the rings allocated to any children
+ * functions if any.
+ */
+ uint16_t num_tx_rings;
+ /*
+ * The number of transmit rings requested for the function. This
+ * does not include the rings allocated to any children
+ * functions if any.
+ */
+ uint16_t num_rx_rings;
+ /*
+ * The number of receive rings requested for the function. This
+ * does not include the rings allocated to any children
+ * functions if any.
+ */
+ uint16_t num_l2_ctxs;
+ /* The requested number of L2 contexts for the function. */
+ uint16_t num_vnics;
+ /* The requested number of vnics for the function. */
+ uint16_t num_stat_ctxs;
+ /* The requested number of statistic contexts for the function. */
+ uint16_t num_hw_ring_grps;
+ /*
+ * The number of HW ring groups that should be reserved for this
+ * function.
+ */
+ uint8_t dflt_mac_addr[6];
+ /* The default MAC address for the function being configured. */
+ uint16_t dflt_vlan;
+ /*
+ * The default VLAN for the function being configured. This
+ * field's format is same as 802.1Q Tag's Tag Control
+ * Information (TCI) format that includes both Priority Code
+ * Point (PCP) and VLAN Identifier (VID).
+ */
+ uint32_t dflt_ip_addr[4];
+ /*
+ * The default IP address for the function being configured.
+ * This address is only used in enabling source property check.
+ */
+ uint32_t min_bw;
+ /*
+ * Minimum BW allocated for this function. The HWRM will
+ * translate this value into byte counter and time interval used
+ * for the scheduler inside the device.
+ */
+ /* The bandwidth value. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \
+ FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \
+ FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID
+ uint32_t max_bw;
+ /*
+ * Maximum BW allocated for this function. The HWRM will
+ * translate this value into byte counter and time interval used
+ * for the scheduler inside the device.
+ */
+ /* The bandwidth value. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \
+ FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
+ uint16_t async_event_cr;
+ /*
+ * ID of the target completion ring for receiving asynchronous
+ * event completions. If this field is not valid, then the HWRM
+ * shall use the default completion ring of the function that is
+ * being configured as the target completion ring for providing
+ * any asynchronous event completions for that function. If this
+ * field is valid, then the HWRM shall use the completion ring
+ * identified by this ID as the target completion ring for
+ * providing any asynchronous event completions for the function
+ * that is being configured.
+ */
+ uint8_t vlan_antispoof_mode;
+ /* VLAN Anti-spoofing mode. */
+ /* No VLAN anti-spoofing checks are enabled */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK UINT32_C(0x0)
+ /* Validate VLAN against the configured VLAN(s) */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \
+ UINT32_C(0x1)
+ /* Insert VLAN if it does not exist, otherwise discard */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \
+ UINT32_C(0x2)
+ /*
+ * Insert VLAN if it does not exist, override
+ * VLAN if it exists
+ */
+ #define \
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \
+ UINT32_C(0x3)
+ uint8_t allowed_vlan_pris;
+ /*
+ * This bit field defines VLAN PRIs that are allowed on this
+ * function. If nth bit is set, then VLAN PRI n is allowed on
+ * this function.
+ */
+ uint8_t evb_mode;
+ /*
+ * The HWRM shall allow a PF driver to change EVB mode for the
+ * partition it belongs to. The HWRM shall not allow a VF driver
+ * to change the EVB mode. The HWRM shall take into account the
+ * switching of EVB mode from one to another and reconfigure
+ * hardware resources as appropriately. The switching from VEB
+ * to VEPA mode requires the disabling of the loopback traffic.
+ * Additionally, source knock outs are handled differently in
+ * VEB and VEPA modes.
+ */
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ uint8_t unused_2;
+ uint16_t num_mcast_filters;
+ /*
+ * The number of multicast filters that should be reserved for
+ * this function on the RX side.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_qstats */
+/*
+ * Description: This command returns statistics of a function. The input FID
+ * value is used to indicate what function is being queried. This allows a
+ * physical function driver to query virtual functions that are children of the
+ * physical function. The HWRM shall return any unsupported counter with a value
+ * of 0xFFFFFFFF for 32-bit counters and 0xFFFFFFFFFFFFFFFF for 64-bit counters.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being queried. 0xFF...
+ * (All Fs) if the query is for the requesting function.
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted unicast packets on the function. */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted multicast packets on the function. */
+ uint64_t tx_bcast_pkts;
+ /* Number of transmitted broadcast packets on the function. */
+ uint64_t tx_err_pkts;
+ /*
+ * Number of transmitted packets that were discarded due to
+ * internal NIC resource problems. For transmit, this can only
+ * happen if TMP is configured to allow dropping in HOL blocking
+ * conditions, which is not a normal configuration.
+ */
+ uint64_t tx_drop_pkts;
+ /*
+ * Number of dropped packets on transmit path on the function.
+ * These are packets that have been marked for drop by the TE
+ * CFA block or are packets that exceeded the transmit MTU limit
+ * for the function.
+ */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for unicast traffic on the function. */
+ uint64_t tx_mcast_bytes;
+ /*
+ * Number of transmitted bytes for multicast traffic on the
+ * function.
+ */
+ uint64_t tx_bcast_bytes;
+ /*
+ * Number of transmitted bytes for broadcast traffic on the
+ * function.
+ */
+ uint64_t rx_ucast_pkts;
+ /* Number of received unicast packets on the function. */
+ uint64_t rx_mcast_pkts;
+ /* Number of received multicast packets on the function. */
+ uint64_t rx_bcast_pkts;
+ /* Number of received broadcast packets on the function. */
+ uint64_t rx_err_pkts;
+ /*
+ * Number of received packets that were discarded on the
+ * function due to resource limitations. This can happen for 3
+ * reasons. # The BD used for the packet has a bad format. #
+ * There were no BDs available in the ring for the packet. #
+ * There were no BDs available on-chip for the packet.
+ */
+ uint64_t rx_drop_pkts;
+ /*
+ * Number of dropped packets on received path on the function.
+ * These are packets that have been marked for drop by the RE
+ * CFA.
+ */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for unicast traffic on the function. */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for multicast traffic on the function. */
+ uint64_t rx_bcast_bytes;
+ /* Number of received bytes for broadcast traffic on the function. */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast packets on the function. */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregated unicast bytes on the function. */
+ uint64_t rx_agg_events;
+ /* Number of aggregation events on the function. */
+ uint64_t rx_agg_aborts;
+ /* Number of aborted aggregations on the function. */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_clr_stats */
+/*
+ * Description: This command clears statistics of a function. The input FID
+ * value is used to indicate what function's statistics is being cleared. This
+ * allows a physical function driver to clear statistics of virtual functions
+ * that are children of the physical function.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function. 0xFF... (All Fs) if the query is
+ * for the requesting function.
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Description: This command is used to query vf vnic ids. */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t vf_id;
+ /*
+ * This value is used to identify a Virtual Function (VF). The
+ * scope of VF ID is local within a PF.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t max_vnic_id_cnt;
+ /* Max number of vnic ids in vnic id table */
+ uint64_t vnic_id_tbl_addr;
+ /* This is the address for VF VNIC ID table */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t vnic_id_cnt;
+ /*
+ * Actual number of vnic ids Each VNIC ID is written as a 32-bit
+ * number.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
uint8_t valid;
/*
* This field is used in Output records to indicate that the
@@ -2268,7 +3813,7 @@ struct hwrm_func_qcfg_output {
* function driver shall use this command during the driver initialization right
* after the HWRM version discovery and default ring resources allocation.
*/
-/* Input (80 bytes) */
+/* Input (80 bytes) */
struct hwrm_func_drv_rgtr_input {
uint16_t req_type;
/*
@@ -2324,9 +3869,12 @@ struct hwrm_func_drv_rgtr_input {
* This bit must be '1' for the async_event_fwd field to be
* configured.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
uint16_t os_type;
- /* This value indicates the type of OS. */
+ /*
+ * This value indicates the type of OS. The values are based on
+ * CIM_OperatingSystem.mof file as published by the DMTF.
+ */
/* Unknown */
#define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
/* Other OS not listed below. */
@@ -2374,17 +3922,17 @@ struct hwrm_func_drv_rgtr_input {
uint32_t async_event_fwd[8];
/*
* This is a 256-bit bit mask provided by the function driver
- * (PF or VF driver) to indicate the list of asynchronous event
+ * (PF or VF driver) to indicate the list of asynchronous event
* completions to be forwarded. Nth bit refers to the Nth
* event_id. Setting Nth bit to 1 by the function driver shall
* result in the HWRM forwarding asynchronous event completion
- * with event_id equal to N. If all bits are set to 0 (value of
+ * with event_id equal to N. If all bits are set to 0 (value of
* 0), then the HWRM shall not forward any asynchronous event
* completion to this function driver.
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_func_drv_rgtr_output {
uint16_t error_code;
/*
@@ -2422,7 +3970,7 @@ struct hwrm_func_drv_rgtr_output {
* the HWRM. A function driver shall implement this command. A function driver
* shall use this command during the driver unloading.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_func_drv_unrgtr_input {
uint16_t req_type;
/*
@@ -2455,11 +4003,12 @@ struct hwrm_func_drv_unrgtr_input {
* When this bit is '1', the function driver is notifying the
* HWRM to prepare for the shutdown.
*/
- #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
+ UINT32_C(0x1)
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_func_drv_unrgtr_output {
uint16_t error_code;
/*
@@ -2491,6 +4040,319 @@ struct hwrm_func_drv_unrgtr_output {
*/
} __attribute__((packed));
+/* hwrm_func_buf_rgtr */
+/*
+ * Description: This command is used by the PF driver to register buffers used
+ * in the PF-VF communication with the HWRM. The PF driver uses this command to
+ * register buffers for each PF-VF channel. A parent PF may issue this command
+ * per child VF. If VF ID is not valid, then this command is used to register
+ * buffers for all children VFs of the PF.
+ */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the vf_id field to be configured. */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ /* This bit must be '1' for the err_buf_addr field to be configured. */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
+ uint16_t vf_id;
+ /*
+ * This value is used to identify a Virtual Function (VF). The
+ * scope of VF ID is local within a PF.
+ */
+ uint16_t req_buf_num_pages;
+ /*
+ * This field represents the number of pages used for request
+ * buffer(s).
+ */
+ uint16_t req_buf_page_size;
+ /* This field represents the page size used for request buffer(s). */
+ /* 16 bytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_16B UINT32_C(0x4)
+ /* 4 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4K UINT32_C(0xc)
+ /* 8 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_8K UINT32_C(0xd)
+ /* 64 Kbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_64K UINT32_C(0x10)
+ /* 2 Mbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_2M UINT32_C(0x15)
+ /* 4 Mbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4M UINT32_C(0x16)
+ /* 1 Gbytes */
+ #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_1G UINT32_C(0x1e)
+ uint16_t req_buf_len;
+ /* The length of the request buffer per VF in bytes. */
+ uint16_t resp_buf_len;
+ /* The length of the response buffer in bytes. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint64_t req_buf_page_addr[10];
+ /* This field represents the page address of req buffer. */
+ uint64_t error_buf_addr;
+ /*
+ * This field is used to receive the error reporting from the
+ * chipset. Only applicable for PFs.
+ */
+ uint64_t resp_buf_addr;
+ /* This field is used to receive the response forwarded by the HWRM. */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_buf_unrgtr */
+/*
+ * Description: This command is used by the PF driver to unregister buffers used
+ * in the PF-VF communication with the HWRM. The PF driver uses this command to
+ * unregister buffers for PF-VF communication. A parent PF may issue this
+ * command to unregister buffers for communication between the PF and a specific
+ * VF. If the VF ID is not valid, then this command is used to unregister
+ * buffers used for communications with all children VFs of the PF.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_buf_unrgtr_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the vf_id field to be configured. */
+ #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ uint16_t vf_id;
+ /*
+ * This value is used to identify a Virtual Function (VF). The
+ * scope of VF ID is local within a PF.
+ */
+ uint16_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_unrgtr_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_vf_cfg */
+/*
+ * Description: This command allows configuration of a VF by its driver. If this
+ * function is called by a PF driver, then the HWRM shall fail this command. If
+ * guest VLAN and/or MAC address are provided in this command, then the HWRM
+ * shall set up appropriate MAC/VLAN filters for the VF that is being
+ * configured. A VF driver should set VF MTU/MRU using this command prior to
+ * allocating RX VNICs or TX rings for the corresponding VF.
+ */
+/* Input (32 bytes) */
+
+struct hwrm_func_vf_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the mtu field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
+ /* This bit must be '1' for the guest_vlan field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the async_event_cr field to be configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4)
+ /* This bit must be '1' for the dflt_mac_addr field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
+ uint16_t mtu;
+ /*
+ * The maximum transmission unit requested on the function. The HWRM
+ * should make sure that the mtu of the function does not exceed the mtu
+ * of the physical port that this function is associated with. In
+ * addition to requesting mtu per function, it is possible to configure
+ * mtu per transmit ring. By default, the mtu of each transmit ring
+ * associated with a function is equal to the mtu of the function. The
+ * HWRM should make sure that the mtu of each transmit ring that is
+ * assigned to a function has a valid mtu.
+ */
+ uint16_t guest_vlan;
+ /*
+ * The guest VLAN for the function being configured. This field's format
+ * is same as 802.1Q Tag's Tag Control Information (TCI) format that
+ * includes both Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t async_event_cr;
+ /*
+ * ID of the target completion ring for receiving asynchronous event
+ * completions. If this field is not valid, then the HWRM shall use the
+ * default completion ring of the function that is being configured as
+ * the target completion ring for providing any asynchronous event
+ * completions for that function. If this field is valid, then the HWRM
+ * shall use the completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event completions for
+ * the function that is being configured.
+ */
+ uint8_t dflt_mac_addr[6];
+ /*
+ * This value is the current MAC address requested by the VF driver to
+ * be configured on this VF. A value of 00-00-00-00-00-00 indicates no
+ * MAC address configuration is requested by the VF driver. The parent
+ * PF driver may reject or overwrite this MAC address.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_func_vf_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_port_phy_cfg */
/*
* Description: This command configures the PHY device for the port. It allows
@@ -2500,7 +4362,7 @@ struct hwrm_func_drv_unrgtr_output {
* configure PHY using this command. In a network partition mode, a PF driver
* shall not be allowed to configure PHY using this command.
*/
-/* Input (56 bytes) */
+/* Input (56 bytes) */
struct hwrm_port_phy_cfg_input {
uint16_t req_type;
/*
@@ -2540,19 +4402,8 @@ struct hwrm_port_phy_cfg_input {
* PHY configuration and settings specified in this command.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
- /*
- * When this bit is set to '1', the link shall be forced to be
- * taken down. # When this bit is set to '1", all other command
- * input settings related to the link speed shall be ignored.
- * Once the link state is forced down, it can be explicitly
- * cleared from that state by setting this flag to '0'. # If
- * this flag is set to '0', then the link shall be cleared from
- * forced down state if the link is in forced down state. There
- * may be conditions (e.g. out-of-band or sideband configuration
- * changes for the link) outside the scope of the HWRM
- * implementation that may clear forced down link state.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2)
+ /* deprecated bit. Do not use!!! */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2)
/*
* When this bit is set to '1', the link shall be forced to the
* force_link_speed value. When this bit is set to '1', the HWRM
@@ -2570,14 +4421,14 @@ struct hwrm_port_phy_cfg_input {
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE)
* is requested to be enabled on this link. If EEE is not
* supported on this port, then this flag shall be ignored by
* the HWRM.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE)
* is requested to be disabled on this link. If EEE is not
* supported on this port, then this flag shall be ignored by
* the HWRM.
@@ -2598,49 +4449,67 @@ struct hwrm_port_phy_cfg_input {
* ignored by the HWRM. If EEE is disabled on this port, then
* this flag shall be ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
/*
* When set to 1, then the HWRM shall enable FEC
* autonegotitation on this port if supported. When set to 0,
* then this flag shall be ignored. If FEC autonegotiation is
* not supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
/*
* When set to 1, then the HWRM shall disable FEC
* autonegotiation on this port if supported. When set to 0,
* then this flag shall be ignored. If FEC autonegotiation is
* not supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
+ UINT32_C(0x200)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
* Code) on this port if supported. When set to 0, then this
* flag shall be ignored. If FEC CLAUSE 74 is not supported,
* then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE UINT32_C(0x400)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
+ UINT32_C(0x400)
/*
* When set to 1, then the HWRM shall disable FEC CLAUSE 74
- * (Fire Code) on this port if supported. When set to 0, then
+ * (Fire Code) on this port if supported. When set to 0, then
* this flag shall be ignored. If FEC CLAUSE 74 is not
* supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE UINT32_C(0x800)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \
+ UINT32_C(0x800)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed
* Solomon) on this port if supported. When set to 0, then this
* flag shall be ignored. If FEC CLAUSE 91 is not supported,
* then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE UINT32_C(0x1000)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \
+ UINT32_C(0x1000)
/*
* When set to 1, then the HWRM shall disable FEC CLAUSE 91
- * (Reed Solomon) on this port if supported. When set to 0, then
+ * (Reed Solomon) on this port if supported. When set to 0, then
* this flag shall be ignored. If FEC CLAUSE 91 is not
* supported, then the HWRM shall ignore this flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE UINT32_C(0x2000)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
+ UINT32_C(0x2000)
+ /*
+ * When this bit is set to '1', the link shall be forced to be
+ * taken down. # When this bit is set to '1", all other command
+ * input settings related to the link speed shall be ignored.
+ * Once the link state is forced down, it can be explicitly
+ * cleared from that state by setting this flag to '0'. # If
+ * this flag is set to '0', then the link shall be cleared from
+ * forced down state if the link is in forced down state. There
+ * may be conditions (e.g. out-of-band or sideband configuration
+ * changes for the link) outside the scope of the HWRM
+ * implementation that may clear forced down link state.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN UINT32_C(0x4000)
uint32_t enables;
/* This bit must be '1' for the auto_mode field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
@@ -2657,7 +4526,8 @@ struct hwrm_port_phy_cfg_input {
* This bit must be '1' for the auto_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
+ UINT32_C(0x10)
/* This bit must be '1' for the wirespeed field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20)
/* This bit must be '1' for the lpbk field to be configured. */
@@ -2670,7 +4540,8 @@ struct hwrm_port_phy_cfg_input {
* This bit must be '1' for the eee_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
+ UINT32_C(0x200)
/* This bit must be '1' for the tx_lpi_timer field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400)
uint16_t port_id;
@@ -2772,7 +4643,7 @@ struct hwrm_port_phy_cfg_input {
* set to 1, auto_pause bits should be ignored and should be set
* to 0.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
uint8_t unused_0;
uint16_t auto_link_speed;
/*
@@ -2808,34 +4679,46 @@ struct hwrm_port_phy_cfg_input {
* autoneg_mode is "mask". If unsupported speed is enabled an
* error will be generated.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
+ UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB UINT32_C(0x100)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
+ UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
+ UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB UINT32_C(0x400)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
+ UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB UINT32_C(0x2000)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
+ UINT32_C(0x2000)
uint8_t wirespeed;
/* This value controls the wirespeed feature. */
/* Wirespeed feature is disabled. */
@@ -2878,7 +4761,7 @@ struct hwrm_port_phy_cfg_input {
uint32_t preemphasis;
/*
* This value controls the pre-emphasis to be used for the link.
- * Driver should not set this value (use enable.preemphasis = 0)
+ * Driver should not set this value (use enable.preemphasis = 0)
* unless driver is sure of setting. Normally HWRM FW will
* determine proper pre-emphasis.
*/
@@ -2892,19 +4775,19 @@ struct hwrm_port_phy_cfg_input {
* speed shall be provided in this mask.
*/
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
uint8_t unused_2;
uint8_t unused_3;
uint32_t tx_lpi_timer;
@@ -2913,11 +4796,11 @@ struct hwrm_port_phy_cfg_input {
* Reuested setting of TX LPI timer in microseconds. This field
* is valid only when EEE is enabled and TX LPI is enabled.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
#define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_port_phy_cfg_output {
uint16_t error_code;
/*
@@ -2951,7 +4834,7 @@ struct hwrm_port_phy_cfg_output {
/* hwrm_port_phy_qcfg */
/* Description: This command queries the PHY configuration for the port. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_port_phy_qcfg_input {
uint16_t req_type;
/*
@@ -2984,7 +4867,7 @@ struct hwrm_port_phy_qcfg_input {
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (96 bytes) */
+/* Output (96 bytes) */
struct hwrm_port_phy_qcfg_output {
uint16_t error_code;
/*
@@ -3062,50 +4945,47 @@ struct hwrm_port_phy_qcfg_output {
* each speed that is supported, the corrresponding bit will be
* set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
- UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8)
/* 2Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000)
uint16_t force_link_speed;
/*
* Current setting of forced link speed. When the link speed is
* not being forced, this value shall be set to 0.
*/
/* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB \
- UINT32_C(0x19)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
@@ -3113,14 +4993,13 @@ struct hwrm_port_phy_qcfg_output {
/* 25Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
- UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
uint8_t auto_mode;
/* Current setting of auto negotiation mode. */
/*
@@ -3142,8 +5021,7 @@ struct hwrm_port_phy_qcfg_output {
* DEPRECATED. An HWRM client should not use
* this mode.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW \
- UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
* Select the speeds based on the corresponding
* link speed mask value that is provided.
@@ -3177,8 +5055,7 @@ struct hwrm_port_phy_qcfg_output {
* set to 1, auto_pause bits should be ignored and should be set
* to 0.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
- UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
uint16_t auto_link_speed;
/*
* Current setting for auto_link_speed. This field is only valid
@@ -3203,9 +5080,9 @@ struct hwrm_port_phy_qcfg_output {
/* 50Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
uint16_t auto_link_speed_mask;
/*
* Current setting for auto_link_speed_mask that is used to
@@ -3214,23 +5091,22 @@ struct hwrm_port_phy_qcfg_output {
* in this field shall be a subset of supported speeds on this
* port.
*/
- /* 100Mb link speed (Half-duplex) */
+ /* 100Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
/* 2Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
@@ -3248,12 +5124,12 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
+ /* 10Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
+ /* 10Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
UINT32_C(0x2000)
uint8_t wirespeed;
@@ -3302,18 +5178,16 @@ struct hwrm_port_phy_qcfg_output {
/* Module is inserted and accepted */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0)
/* Module is rejected and transmit side Laser is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX UINT32_C(0x1)
/* Module mismatch warning. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
- UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG UINT32_C(0x2)
/* Module is rejected and powered down. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3)
/* Module is not inserted. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
UINT32_C(0x4)
/* Module status is not applicable. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
UINT32_C(0xff)
uint32_t preemphasis;
/* Current setting for preemphasis. */
@@ -3329,13 +5203,13 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0)
/* BASE-CR */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1)
- /* BASE-KR4 (Deprecated) */
+ /* BASE-KR4 (Deprecated) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2)
/* BASE-LR */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3)
/* BASE-SR */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4)
- /* BASE-KR2 (Deprecated) */
+ /* BASE-KR2 (Deprecated) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5)
/* BASE-KX */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6)
@@ -3347,6 +5221,35 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9)
/* SGMII connected external PHY */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa)
+ /* 25G_BASECR_CA_L */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L UINT32_C(0xb)
+ /* 25G_BASECR_CA_S */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S UINT32_C(0xc)
+ /* 25G_BASECR_CA_N */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N UINT32_C(0xd)
+ /* 25G_BASESR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR UINT32_C(0xe)
+ /* 100G_BASECR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 UINT32_C(0xf)
+ /* 100G_BASESR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 UINT32_C(0x10)
+ /* 100G_BASELR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 UINT32_C(0x11)
+ /* 100G_BASEER4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 UINT32_C(0x12)
+ /* 100G_BASESR10 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 UINT32_C(0x13)
+ /* 40G_BASECR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 UINT32_C(0x14)
+ /* 40G_BASESR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 UINT32_C(0x15)
+ /* 40G_BASELR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 UINT32_C(0x16)
+ /* 40G_BASEER4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 UINT32_C(0x17)
+ /* 40G_ACTIVE_CABLE */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \
+ UINT32_C(0x18)
uint8_t media_type;
/* This value represents a media type. */
/* Unknown */
@@ -3360,35 +5263,34 @@ struct hwrm_port_phy_qcfg_output {
uint8_t xcvr_pkg_type;
/* This value represents a transceiver type. */
/* PHY and MAC are in the same package */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
UINT32_C(0x1)
/* PHY and MAC are in different packages */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
UINT32_C(0x2)
uint8_t eee_config_phy_addr;
/*
* This field represents flags related to EEE configuration.
* These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
+ * auto_mode is not set to none (in other words autonegotiation
* is enabled).
*/
/* This field represents PHY address. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f)
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
/*
- * When set to 1, Energy Efficient Ethernet (EEE) mode is
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is
* enabled. Speeds for autoneg with EEE mode enabled are based
* on eee_link_speed_mask.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20)
/*
* This flag is valid only when eee_enabled is set to 1. # If
* eee_enabled is set to 0, then EEE mode is disabled and this
* flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
+ * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
* is enabled and in use. # If eee_enabled is set to 1 and this
- * flag is set to 0, then Energy Efficient Ethernet (EEE) mode
+ * flag is set to 0, then Energy Efficient Ethernet (EEE) mode
* is enabled but is currently not in use.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40)
@@ -3396,16 +5298,16 @@ struct hwrm_port_phy_qcfg_output {
* This flag is valid only when eee_enabled is set to 1. # If
* eee_enabled is set to 0, then EEE mode is disabled and this
* flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
+ * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
* is enabled and TX LPI is enabled. # If eee_enabled is set to
* 1 and this flag is set to 0, then Energy Efficient Ethernet
- * (EEE) mode is enabled but TX LPI is disabled.
+ * (EEE) mode is enabled but TX LPI is disabled.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80)
/*
* This field represents flags related to EEE configuration.
* These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
+ * auto_mode is not set to none (in other words autonegotiation
* is enabled).
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0)
@@ -3427,16 +5329,16 @@ struct hwrm_port_phy_qcfg_output {
* The advertised speeds for the port by the link partner. Each
* advertised speed will be set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
+ /* 100Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ /* 100Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
+ /* 1Gb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
UINT32_C(0x8)
/* 2Gb link speed */
@@ -3463,10 +5365,10 @@ struct hwrm_port_phy_qcfg_output {
/* 100Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
+ /* 10Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
+ /* 10Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
UINT32_C(0x2000)
uint8_t link_partner_adv_auto_mode;
@@ -3483,13 +5385,14 @@ struct hwrm_port_phy_qcfg_output {
/* Select all possible speeds for autoneg mode. */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
- UINT32_C(0x1)
+ UINT32_C(0x1)
/*
* Select only the auto_link_speed speed for
* autoneg mode. This mode has been DEPRECATED.
* An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
UINT32_C(0x2)
/*
* Select the auto_link_speed or any speed below
@@ -3499,14 +5402,14 @@ struct hwrm_port_phy_qcfg_output {
*/
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
- UINT32_C(0x3)
+ UINT32_C(0x3)
/*
* Select the speeds based on the corresponding
* link speed mask value that is provided.
*/
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
- UINT32_C(0x4)
+ UINT32_C(0x4)
uint8_t link_partner_adv_pause;
/* The advertised pause settings on the port by the link partner. */
/*
@@ -3532,13 +5435,13 @@ struct hwrm_port_phy_qcfg_output {
/* Reserved */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ /* 100Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
/* Reserved */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ /* 1Gb link speed (Full-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* Reserved */
@@ -3559,31 +5462,31 @@ struct hwrm_port_phy_qcfg_output {
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
- UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
- UINT32_C(0x2)
+ UINT32_C(0x2)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
- UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ UINT32_C(0x8)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
- UINT32_C(0x10)
+ UINT32_C(0x10)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
- UINT32_C(0x20)
+ UINT32_C(0x20)
/* 10Gb link speed */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
- UINT32_C(0x40)
+ UINT32_C(0x40)
uint32_t xcvr_identifier_type_tx_lpi_timer;
/* This value represents transceiver identifier type. */
/*
@@ -3591,32 +5494,31 @@ struct hwrm_port_phy_qcfg_output {
* is valid only when_eee_enabled flag is set to 1 and
* tx_lpi_enabled is set to 1.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
- UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
#define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
/* This value represents transceiver identifier type. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
UINT32_C(0xff000000)
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
/* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
(UINT32_C(0x0) << 24)
/* SFP/SFP+/SFP28 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
(UINT32_C(0x3) << 24)
/* QSFP */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
(UINT32_C(0xc) << 24)
/* QSFP+ */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
(UINT32_C(0xd) << 24)
/* QSFP28 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
(UINT32_C(0x11) << 24)
uint16_t fec_cfg;
/*
* This value represents the current configuration of Forward
- * Error Correction (FEC) on the port.
+ * Error Correction (FEC) on the port.
*/
/*
* When set to 1, then FEC is not supported on this port. If
@@ -3627,7 +5529,7 @@ struct hwrm_port_phy_qcfg_output {
* then the HWRM shall set this flag to 1 when reporting FEC
* capability.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
UINT32_C(0x1)
/*
* When set to 1, then FEC autonegotiation is supported on this
@@ -3645,30 +5547,30 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
UINT32_C(0x4)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on
+ * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
* not supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
UINT32_C(0x8)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on
+ * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
* disabled if supported. This flag should be ignored if FEC
* CLAUSE 74 is not supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
UINT32_C(0x10)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported
+ * on this port. When set to 0, then FEC CLAUSE 91 (Reed
* Solomon) is not supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
UINT32_C(0x20)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled
+ * on this port. When set to 0, then FEC CLAUSE 91 (Reed
* Solomon) is disabled if supported. This flag should be
* ignored if FEC CLAUSE 91 is not supported on this port.
*/
@@ -3704,6 +5606,1132 @@ struct hwrm_port_phy_qcfg_output {
*/
} __attribute__((packed));
+/* hwrm_port_qstats */
+/* Description: This function returns per port Ethernet statistics. */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port that is being queried. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2[3];
+ uint8_t unused_3;
+ uint64_t tx_stat_host_addr;
+ /* This is the host address where Tx port statistics will be stored */
+ uint64_t rx_stat_host_addr;
+ /* This is the host address where Rx port statistics will be stored */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t tx_stat_size;
+ /* The size of TX port statistics block in bytes. */
+ uint16_t rx_stat_size;
+ /* The size of RX port statistics block in bytes. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_clr_stats */
+/*
+ * Description: This function clears per port statistics. The HWRM shall not
+ * allow a VF driver to clear port statistics. The HWRM shall not allow a PF
+ * driver to clear port statistics in a partitioning mode. The HWRM may allow a
+ * PF driver to clear port statistics in the non-partitioning mode.
+ */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port that is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_led_cfg */
+/*
+ * Description: This function is used to configure LEDs on a given port. Each
+ * port has individual set of LEDs associated with it. These LEDs are used for
+ * speed/link configuration as well as activity indicator configuration. Up to
+ * three LEDs can be configured, one for activity and two for speeds.
+ */
+/* Input (64 bytes) */
+struct hwrm_port_led_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the led0_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID UINT32_C(0x1)
+ /* This bit must be '1' for the led0_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE UINT32_C(0x2)
+ /* This bit must be '1' for the led0_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the led0_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the led0_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the led0_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID UINT32_C(0x20)
+ /* This bit must be '1' for the led1_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID UINT32_C(0x40)
+ /* This bit must be '1' for the led1_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE UINT32_C(0x80)
+ /* This bit must be '1' for the led1_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the led1_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the led1_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the led1_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID UINT32_C(0x800)
+ /* This bit must be '1' for the led2_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID UINT32_C(0x1000)
+ /* This bit must be '1' for the led2_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE UINT32_C(0x2000)
+ /* This bit must be '1' for the led2_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the led2_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the led2_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the led2_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID UINT32_C(0x20000)
+ /* This bit must be '1' for the led3_id field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID UINT32_C(0x40000)
+ /* This bit must be '1' for the led3_state field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE UINT32_C(0x80000)
+ /* This bit must be '1' for the led3_color field to be configured. */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the led3_blink_on field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON UINT32_C(0x200000)
+ /*
+ * This bit must be '1' for the led3_blink_off field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \
+ UINT32_C(0x400000)
+ /*
+ * This bit must be '1' for the led3_group_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID UINT32_C(0x800000)
+ uint16_t port_id;
+ /* Port ID of port whose LEDs are configured. */
+ uint8_t num_leds;
+ /*
+ * The number of LEDs that are being configured. Up to 4 LEDs
+ * can be configured with this command.
+ */
+ uint8_t rsvd;
+ /* Reserved field. */
+ uint8_t led0_id;
+ /* An identifier for the LED #0. */
+ uint8_t led0_state;
+ /* The requested state of the LED #0. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led0_color;
+ /* The requested color of LED #0. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_0;
+ uint16_t led0_blink_on;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led0_blink_off;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led0_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #0 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd0;
+ /* Reserved field. */
+ uint8_t led1_id;
+ /* An identifier for the LED #1. */
+ uint8_t led1_state;
+ /* The requested state of the LED #1. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led1_color;
+ /* The requested color of LED #1. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_1;
+ uint16_t led1_blink_on;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led1_blink_off;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led1_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs to.
+ * If set to 0, then the LED #1 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #1 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd1;
+ /* Reserved field. */
+ uint8_t led2_id;
+ /* An identifier for the LED #2. */
+ uint8_t led2_state;
+ /* The requested state of the LED #2. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led2_color;
+ /* The requested color of LED #2. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_2;
+ uint16_t led2_blink_on;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led2_blink_off;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led2_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #2 belongs to.
+ * If set to 0, then the LED #2 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #2 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd2;
+ /* Reserved field. */
+ uint8_t led3_id;
+ /* An identifier for the LED #3. */
+ uint8_t led3_state;
+ /* The requested state of the LED #3. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led3_color;
+ /* The requested color of LED #3. */
+ /* Default */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_3;
+ uint16_t led3_blink_on;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led3_blink_off;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led3_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs to.
+ * If set to 0, then the LED #3 shall not be grouped and shall
+ * be treated as an individual resource. For all other non-zero
+ * values of this field, LED #3 shall be grouped together with
+ * the LEDs with the same group ID value.
+ */
+ uint8_t rsvd3;
+ /* Reserved field. */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_port_led_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_led_qcfg */
+/*
+ * Description: This function is used to query configuration of LEDs on a given
+ * port. Each port has individual set of LEDs associated with it. These LEDs are
+ * used for speed/link configuration as well as activity indicator
+ * configuration. Up to three LEDs can be configured, one for activity and two
+ * for speeds.
+ */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port whose LED configuration is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (56 bytes) */
+struct hwrm_port_led_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint8_t num_leds;
+ /*
+ * The number of LEDs that are configured on this port. Up to 4
+ * LEDs can be returned in the response.
+ */
+ uint8_t led0_id;
+ /* An identifier for the LED #0. */
+ uint8_t led0_type;
+ /* The type of LED #0. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led0_state;
+ /* The current state of the LED #0. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led0_color;
+ /* The color of LED #0. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_0;
+ uint16_t led0_blink_on;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led0_blink_off;
+ /*
+ * If the LED #0 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led0_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 is not grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t led1_id;
+ /* An identifier for the LED #1. */
+ uint8_t led1_type;
+ /* The type of LED #1. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led1_state;
+ /* The current state of the LED #1. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led1_color;
+ /* The color of LED #1. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_1;
+ uint16_t led1_blink_on;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led1_blink_off;
+ /*
+ * If the LED #1 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led1_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs to.
+ * If set to 0, then the LED #1 is not grouped. For all other
+ * non-zero values of this field, LED #1 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t led2_id;
+ /* An identifier for the LED #2. */
+ uint8_t led2_type;
+ /* The type of LED #2. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led2_state;
+ /* The current state of the LED #2. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led2_color;
+ /* The color of LED #2. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_2;
+ uint16_t led2_blink_on;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led2_blink_off;
+ /*
+ * If the LED #2 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led2_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #2 belongs to.
+ * If set to 0, then the LED #2 is not grouped. For all other
+ * non-zero values of this field, LED #2 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t led3_id;
+ /* An identifier for the LED #3. */
+ uint8_t led3_type;
+ /* The type of LED #3. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led3_state;
+ /* The current state of the LED #3. */
+ /* Default state of the LED */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ /* Off */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1)
+ /* On */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2)
+ /* Blink */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ /* Blink Alternately */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ uint8_t led3_color;
+ /* The color of LED #3. */
+ /* Default */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ /* Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ /* Green */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ /* Green or Amber */
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ uint8_t unused_3;
+ uint16_t led3_blink_on;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED on
+ * between cycles.
+ */
+ uint16_t led3_blink_off;
+ /*
+ * If the LED #3 state is "blink" or "blinkalt", then this field
+ * represents the requested time in milliseconds to keep LED off
+ * between cycles.
+ */
+ uint8_t led3_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs to.
+ * If set to 0, then the LED #3 is not grouped. For all other
+ * non-zero values of this field, LED #3 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_4;
+ uint16_t unused_5;
+ uint8_t unused_6;
+ uint8_t unused_7;
+ uint8_t unused_8;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_port_led_qcaps */
+/*
+ * Description: This function is used to query capabilities of LEDs on a given
+ * port. Each port has individual set of LEDs associated with it. These LEDs are
+ * used for speed/link configuration as well as activity indicator
+ * configuration.
+ */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcaps_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t port_id;
+ /* Port ID of port whose LED configuration is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (48 bytes) */
+struct hwrm_port_led_qcaps_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint8_t num_leds;
+ /*
+ * The number of LEDs that are configured on this port. Up to 4
+ * LEDs can be returned in the response.
+ */
+ uint8_t unused_0[3];
+ /* Reserved for future use. */
+ uint8_t led0_id;
+ /* An identifier for the LED #0. */
+ uint8_t led0_type;
+ /* The type of LED #0. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led0_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_1;
+ uint16_t led0_state_caps;
+ /* The states supported by LED #0. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led0_color_caps;
+ /* The colors supported by LED #0. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t led1_id;
+ /* An identifier for the LED #1. */
+ uint8_t led1_type;
+ /* The type of LED #1. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led1_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_2;
+ uint16_t led1_state_caps;
+ /* The states supported by LED #1. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led1_color_caps;
+ /* The colors supported by LED #1. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t led2_id;
+ /* An identifier for the LED #2. */
+ uint8_t led2_type;
+ /* The type of LED #2. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led2_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_3;
+ uint16_t led2_state_caps;
+ /* The states supported by LED #2. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led2_color_caps;
+ /* The colors supported by LED #2. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t led3_id;
+ /* An identifier for the LED #3. */
+ uint8_t led3_type;
+ /* The type of LED #3. */
+ /* Speed LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ /* Activity LED */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ /* Invalid */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ uint8_t led3_group_id;
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs to.
+ * If set to 0, then the LED #0 cannot be grouped. For all other
+ * non-zero values of this field, LED #0 is grouped together
+ * with the LEDs with the same group ID value.
+ */
+ uint8_t unused_4;
+ uint16_t led3_state_caps;
+ /* The states supported by LED #3. */
+ /*
+ * If set to 1, this LED is enabled. If set to 0, this LED is
+ * disabled.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED UINT32_C(0x1)
+ /*
+ * If set to 1, off state is supported on this LED. If set to 0,
+ * off state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, on state is supported on this LED. If set to 0,
+ * on state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, blink state is supported on this LED. If set to
+ * 0, blink state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, blink_alt state is supported on this LED. If set
+ * to 0, blink_alt state is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \
+ UINT32_C(0x10)
+ uint16_t led3_color_caps;
+ /* The colors supported by LED #3. */
+ /* reserved */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD UINT32_C(0x1)
+ /*
+ * If set to 1, Amber color is supported on this LED. If set to
+ * 0, Amber color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, Green color is supported on this LED. If set to
+ * 0, Green color is not supported on this LED.
+ */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \
+ UINT32_C(0x4)
+ uint8_t unused_5;
+ uint8_t unused_6;
+ uint8_t unused_7;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_queue_qportcfg */
/*
* Description: This function is called by a driver to query queue configuration
@@ -3715,7 +6743,7 @@ struct hwrm_port_phy_qcfg_output {
* then the driver shall only use queues for which service profiles are pre-
* configured.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
uint16_t req_type;
/*
@@ -3754,7 +6782,7 @@ struct hwrm_queue_qportcfg_input {
#define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
#define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
uint16_t port_id;
/*
@@ -3764,7 +6792,7 @@ struct hwrm_queue_qportcfg_input {
uint16_t unused_0;
} __attribute__((packed));
-/* Output (32 bytes) */
+/* Output (32 bytes) */
struct hwrm_queue_qportcfg_output {
uint16_t error_code;
/*
@@ -3813,18 +6841,18 @@ struct hwrm_queue_qportcfg_output {
* TX side is the same as the corresponding queue configuration
* on the RX side.
*/
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
- UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG UINT32_C(0x1)
uint8_t queue_pfcenable_cfg_allowed;
/*
* Bitmask indicating which queues can be configured by the
* hwrm_queue_pfcenable_cfg command. Each bit represents a
- * specific queue where bit 0 represents queue 0 and bit 7
- * represents queue 7. # A value of 0 indicates that the queue
- * is not configurable by the hwrm_queue_pfcenable_cfg command.
- * # A value of 1 indicates that the queue is configurable. # A
- * hwrm_queue_pfcenable_cfg command shall return error when
- * trying to configure a queue that is not configurable.
+ * specific priority where bit 0 represents priority 0 and bit 7
+ * represents priority 7. # A value of 0 indicates that the
+ * priority is not configurable by the hwrm_queue_pfcenable_cfg
+ * command. # A value of 1 indicates that the priority is
+ * configurable. # A hwrm_queue_pfcenable_cfg command shall
+ * return error when trying to configure a priority that is not
+ * configurable.
*/
uint8_t queue_pri2cos_cfg_allowed;
/*
@@ -3860,14 +6888,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id0_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
@@ -3884,14 +6912,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id1_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
@@ -3908,14 +6936,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id2_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
@@ -3932,14 +6960,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id3_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
@@ -3956,14 +6984,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id4_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
@@ -3980,14 +7008,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id5_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
@@ -4004,14 +7032,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id6_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
@@ -4028,14 +7056,14 @@ struct hwrm_queue_qportcfg_output {
*/
uint8_t queue_id7_service_profile;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
/* Lossless */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
/*
- * Set to 0xFF... (All Fs) if there is no
+ * Set to 0xFF... (All Fs) if there is no
* service profile specified
*/
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
@@ -4068,7 +7096,7 @@ struct hwrm_queue_qportcfg_output {
* enabled, then the internal VNIC to SVIF mapping data structures shall be
* programmed at the time of VNIC allocation.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_vnic_alloc_input {
uint16_t req_type;
/*
@@ -4105,7 +7133,7 @@ struct hwrm_vnic_alloc_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_alloc_output {
uint16_t error_code;
/*
@@ -4144,7 +7172,7 @@ struct hwrm_vnic_alloc_output {
* VNIC as well as the VNIC. Reset and release all resources associated with the
* VNIC.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_vnic_free_input {
uint16_t req_type;
/*
@@ -4177,7 +7205,7 @@ struct hwrm_vnic_free_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_free_output {
uint16_t error_code;
/*
@@ -4211,7 +7239,7 @@ struct hwrm_vnic_free_output {
/* hwrm_vnic_cfg */
/* Description: Configure the RX VNIC structure. */
-/* Input (40 bytes) */
+/* Input (40 bytes) */
struct hwrm_vnic_cfg_input {
uint16_t req_type;
/*
@@ -4271,10 +7299,10 @@ struct hwrm_vnic_cfg_input {
* roce_dual_vnic_mode flag is set to '1', then the HWRM client
* shall not set this flag to '1'.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
/*
* When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where exact match is
+ * application (e.g. Receive Flow Steering) where exact match is
* used to direct packets to a VNIC with one destination ring
* group only, there is no need to configure RSS indirection
* table for that VNIC as only one destination ring group is
@@ -4310,17 +7338,17 @@ struct hwrm_vnic_cfg_input {
*/
uint16_t rss_rule;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
uint16_t cos_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
uint16_t lb_rule;
/*
- * RSS ID for load balancing rule/table structure. 0xFF... (All
+ * RSS ID for load balancing rule/table structure. 0xFF... (All
* Fs) if there is no LB rule.
*/
uint16_t mru;
@@ -4334,7 +7362,7 @@ struct hwrm_vnic_cfg_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_cfg_output {
uint16_t error_code;
/*
@@ -4366,9 +7394,153 @@ struct hwrm_vnic_cfg_output {
*/
} __attribute__((packed));
+/* hwrm_vnic_qcfg */
+/*
+ * Description: Query the RX VNIC structure. This function can be used by a PF
+ * driver to query its own VNIC resource or VNIC resource of its child VF. This
+ * function can also be used by a VF driver to query its own VNIC resource.
+ */
+/* Input (32 bytes) */
+struct hwrm_vnic_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the vf_id_valid field to be configured. */
+ #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
+ uint32_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t vf_id;
+ /* ID of Virtual Function whose VNIC resource is being queried. */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (32 bytes) */
+struct hwrm_vnic_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t dflt_ring_grp;
+ /* Default Completion ring for the VNIC. */
+ uint16_t rss_rule;
+ /*
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * there is no RSS rule.
+ */
+ uint16_t cos_rule;
+ /*
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * there is no COS rule.
+ */
+ uint16_t lb_rule;
+ /*
+ * RSS ID for load balancing rule/table structure. 0xFF... (All
+ * Fs) if there is no LB rule.
+ */
+ uint16_t mru;
+ /* The maximum receive unit of the vnic. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is the default VNIC for the
+ * function.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to strip VLAN in
+ * the RX path. If set to '0', then VLAN stripping is disabled
+ * on this VNIC.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to buffer
+ * receive packets in the hardware until the host posts new
+ * receive buffers. If set to '0', then bd_stall is disabled on
+ * this VNIC.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to receive both
+ * RoCE and non-RoCE traffic. If set to '0', then this VNIC is
+ * not configured to operate in dual VNIC mode.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ /*
+ * When this flag is set to '1', the VNIC is configured to
+ * receive only RoCE traffic. When this flag is set to '0', the
+ * VNIC is not configured to receive only RoCE traffic. If
+ * roce_dual_vnic_mode flag and this flag both are set to '1',
+ * then it is an invalid configuration of the VNIC. The HWRM
+ * should not allow that type of mis-configuration by HWRM
+ * clients.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ /*
+ * When a VNIC uses one destination ring group for certain
+ * application (e.g. Receive Flow Steering) where exact match is
+ * used to direct packets to a VNIC with one destination ring
+ * group only, there is no need to configure RSS indirection
+ * table for that VNIC as only one destination ring group is
+ * used. When this bit is set to '1', then the VNIC is enabled
+ * in a mode where RSS is enabled in the VNIC using a RSS
+ * context for computing RSS hash but the RSS indirection table
+ * is not configured.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
+ uint32_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t unused_5;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_vnic_rss_cfg */
/* Description: This function is used to enable RSS configuration. */
-/* Input (48 bytes) */
+/* Input (48 bytes) */
struct hwrm_vnic_rss_cfg_input {
uint16_t req_type;
/*
@@ -4441,7 +7613,7 @@ struct hwrm_vnic_rss_cfg_input {
uint16_t unused_1[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_rss_cfg_output {
uint16_t error_code;
/*
@@ -4473,9 +7645,295 @@ struct hwrm_vnic_rss_cfg_output {
*/
} __attribute__((packed));
+/* hwrm_vnic_plcmodes_cfg */
+/*
+ * Description: This function can be used to set placement mode configuration of
+ * the VNIC.
+ */
+/* Input (40 bytes) */
+
+struct hwrm_vnic_plcmodes_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to use regular
+ * placement algorithm. By default, the regular placement algorithm
+ * shall be enabled on the VNIC.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured use the jumbo
+ * placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for IPv4 packets according to the following rules: # If
+ * the packet is identified as TCP/IPv4, then the packet is split at the
+ * beginning of the TCP payload. # If the packet is identified as
+ * UDP/IPv4, then the packet is split at the beginning of UDP payload. #
+ * If the packet is identified as non-TCP and non-UDP IPv4 packet, then
+ * the packet is split at the beginning of the upper layer protocol
+ * header carried in the IPv4 packet.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for IPv6 packets according to the following rules: # If
+ * the packet is identified as TCP/IPv6, then the packet is split at the
+ * beginning of the TCP payload. # If the packet is identified as
+ * UDP/IPv6, then the packet is split at the beginning of UDP payload. #
+ * If the packet is identified as non-TCP and non-UDP IPv6 packet, then
+ * the packet is split at the beginning of the upper layer protocol
+ * header carried in the IPv6 packet.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for FCoE packets at the beginning of FC payload.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured to enable Header-
+ * Data split for RoCE packets at the beginning of RoCE payload (after
+ * BTH/GRH headers).
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the jumbo_thresh_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the hds_offset_valid field to be configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the hds_threshold_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \
+ UINT32_C(0x4)
+ uint32_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t jumbo_thresh;
+ /*
+ * When jumbo placement algorithm is enabled, this value is used to
+ * determine the threshold for jumbo placement. Packets with length
+ * larger than this value will be placed according to the jumbo
+ * placement algorithm.
+ */
+ uint16_t hds_offset;
+ /*
+ * This value is used to determine the offset into packet buffer where
+ * the split data (payload) will be placed according to one of of HDS
+ * placement algorithm. The lengths of packet buffers provided for split
+ * data shall be larger than this value.
+ */
+ uint16_t hds_threshold;
+ /*
+ * When one of the HDS placement algorithm is enabled, this value is
+ * used to determine the threshold for HDS placement. Packets with
+ * length larger than this value will be placed according to the HDS
+ * placement algorithm. This value shall be in multiple of 4 bytes.
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_vnic_plcmodes_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_vnic_plcmodes_qcfg */
+/*
+ * Description: This function can be used to query placement mode configuration
+ * of the VNIC.
+ */
+/* Input (24 bytes) */
+
+struct hwrm_vnic_plcmodes_qcfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t vnic_id;
+ /* Logical vnic ID */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (24 bytes) */
+
+struct hwrm_vnic_plcmodes_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is configured to use regular placement
+ * algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to use the jumbo
+ * placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for IPv4 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for IPv6 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for FCoE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC is configured to enable Header-Data
+ * split for RoCE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is configured to be the default VNIC
+ * of the requesting function.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC UINT32_C(0x40)
+ uint16_t jumbo_thresh;
+ /*
+ * When jumbo placement algorithm is enabled, this value is used to
+ * determine the threshold for jumbo placement. Packets with length
+ * larger than this value will be placed according to the jumbo
+ * placement algorithm.
+ */
+ uint16_t hds_offset;
+ /*
+ * This value is used to determine the offset into packet buffer where
+ * the split data (payload) will be placed according to one of of HDS
+ * placement algorithm. The lengths of packet buffers provided for split
+ * data shall be larger than this value.
+ */
+ uint16_t hds_threshold;
+ /*
+ * When one of the HDS placement algorithm is enabled, this value is
+ * used to determine the threshold for HDS placement. Packets with
+ * length larger than this value will be placed according to the HDS
+ * placement algorithm. This value shall be in multiple of 4 bytes.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_vnic_rss_cos_lb_ctx_alloc */
/* Description: This function is used to allocate COS/Load Balance context. */
-/* Input (16 bytes) */
+/* Input (16 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
uint16_t req_type;
/*
@@ -4505,7 +7963,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
uint16_t error_code;
/*
@@ -4542,7 +8000,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
/* hwrm_vnic_rss_cos_lb_ctx_free */
/* Description: This function can be used to free COS/Load Balance context. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
uint16_t req_type;
/*
@@ -4575,7 +8033,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_input {
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
uint16_t error_code;
/*
@@ -4607,11 +8065,187 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
*/
} __attribute__((packed));
+/* hwrm_vnic_tpa_cfg */
+/* Description: This function is used to enable/configure TPA on the VNIC. */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of non-tunneled TCP
+ * packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Windows
+ * Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Linux
+ * Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for TCP packets with IP
+ * ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for GRE tunneled TCP
+ * packets only if all packets have the same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP/IPv4 packets with consecutively increasing
+ * IPIDs. In other words, the last packet that is being
+ * aggregated to an already existing aggregation context shall
+ * have IPID 1 more than the IPID of the last packet that was
+ * aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit
+ * (IPv6) value.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80)
+ uint32_t enables;
+ /* This bit must be '1' for the max_agg_segs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ /* This bit must be '1' for the max_aggs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the max_agg_timer field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /* This bit must be '1' for the min_agg_len field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
+ uint16_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t max_agg_segs;
+ /*
+ * This is the maximum number of TCP segments that can be
+ * aggregated (unit is Log2). Max value is 31.
+ */
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ uint16_t max_aggs;
+ /*
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t max_agg_timer;
+ /*
+ * This is the maximum amount of time allowed for an aggregation
+ * context to complete after it was initiated.
+ */
+ uint32_t min_agg_len;
+ /*
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_ring_alloc */
/*
* Description: This command allocates and does basic preparation for a ring.
*/
-/* Input (80 bytes) */
+/* Input (80 bytes) */
struct hwrm_ring_alloc_input {
uint16_t req_type;
/*
@@ -4657,12 +8291,14 @@ struct hwrm_ring_alloc_input {
#define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20)
uint8_t ring_type;
/* Ring Type. */
- /* Completion Ring (CR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
#define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
+ /* RX Ring (RR) */
#define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
uint8_t unused_0;
uint16_t unused_1;
uint64_t page_tbl_addr;
@@ -4725,23 +8361,22 @@ struct hwrm_ring_alloc_input {
* a TX ring.
*/
/* Arbitration policy used for the ring. */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \
- UINT32_C(0xf)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK UINT32_C(0xf)
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
/*
* Use strict priority for the TX ring. Priority
* value is specified in arb_policy_param
*/
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
(UINT32_C(0x1) << 0)
/*
* Use weighted fair queue arbitration for the
* TX ring. Weight is specified in
* arb_policy_param
*/
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
(UINT32_C(0x2) << 0)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
/* Reserved field. */
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0)
@@ -4758,7 +8393,7 @@ struct hwrm_ring_alloc_input {
*/
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
UINT32_C(0xff00)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
uint8_t unused_6;
uint8_t unused_7;
uint32_t reserved3;
@@ -4778,26 +8413,40 @@ struct hwrm_ring_alloc_input {
* translate this value into byte counter and time interval used
* for this ring inside the device.
*/
- /* Bandwidth value */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
+ /* The bandwidth value. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
- /* Reserved */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_RSVD UINT32_C(0x10000000)
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \
+ RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mbps */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MBPS \
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
(UINT32_C(0x1) << 29)
/* Invalid unit */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
uint8_t int_mode;
/*
@@ -4817,7 +8466,7 @@ struct hwrm_ring_alloc_input {
uint8_t unused_8[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_alloc_output {
uint16_t error_code;
/*
@@ -4858,8 +8507,16 @@ struct hwrm_ring_alloc_output {
/* hwrm_ring_free */
/*
* Description: This command is used to free a ring and associated resources.
+ * With QoS and DCBx agents, it is possible the traffic classes will be moved
+ * from one CoS queue to another. When this occurs, the driver shall call
+ * 'hwrm_ring_free' to free the allocated rings and then call 'hwrm_ring_alloc'
+ * to re-allocate each ring and assign it to a new CoS queue. hwrm_ring_free
+ * shall be called on a ring only after it has been idle for 500ms or more and
+ * no frames have been posted to the ring during this time. All frames queued
+ * for transmission shall be completed and at least 500ms time elapsed from the
+ * last completion before calling this command.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ring_free_input {
uint16_t req_type;
/*
@@ -4889,19 +8546,21 @@ struct hwrm_ring_free_input {
*/
uint8_t ring_type;
/* Ring Type. */
- /* Completion Ring (CR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
#define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
+ /* RX Ring (RR) */
#define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
uint8_t unused_0;
uint16_t ring_id;
/* Physical number of ring allocated. */
uint32_t unused_1;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_free_output {
uint16_t error_code;
/*
@@ -4937,7 +8596,7 @@ struct hwrm_ring_free_output {
/*
* Description: This API allocates and does basic preparation for a ring group.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ring_grp_alloc_input {
uint16_t req_type;
/*
@@ -4972,7 +8631,7 @@ struct hwrm_ring_grp_alloc_input {
uint16_t ar;
/*
* This value identifies the aggregation RR associated with the
- * ring group. If this value is 0xFF... (All Fs), then no
+ * ring group. If this value is 0xFF... (All Fs), then no
* Aggregation ring will be set.
*/
uint16_t sc;
@@ -4982,7 +8641,7 @@ struct hwrm_ring_grp_alloc_input {
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_grp_alloc_output {
uint16_t error_code;
/*
@@ -5028,7 +8687,7 @@ struct hwrm_ring_grp_alloc_output {
* a part of executing this command, the HWRM shall reset all associated ring
* group resources.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_ring_grp_free_input {
uint16_t req_type;
/*
@@ -5061,7 +8720,7 @@ struct hwrm_ring_grp_free_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_ring_grp_free_output {
uint16_t error_code;
/*
@@ -5095,12 +8754,61 @@ struct hwrm_ring_grp_free_output {
/* hwrm_cfa_l2_filter_alloc */
/*
- * A filter is used to identify traffic that contains a matching set of
- * parameters like unicast or broadcast MAC address or a VLAN tag amongst
- * other things which then allows the ASIC to direct the incoming traffic
- * to an appropriate VNIC or Rx ring.
+ * Description: An L2 filter is a filter resource that is used to identify a
+ * vnic or ring for a packet based on layer 2 fields. Layer 2 fields for
+ * encapsulated packets include both outer L2 header and/or inner l2 header of
+ * encapsulated packet. The L2 filter resource covers the following OS specific
+ * L2 filters. Linux/FreeBSD (per function): # Broadcast enable/disable # List
+ * of individual multicast filters # All multicast enable/disable filter #
+ * Unicast filters # Promiscuous mode VMware: # Broadcast enable/disable (per
+ * physical function) # All multicast enable/disable (per function) # Unicast
+ * filters per ring or vnic # Promiscuous mode per PF Windows: # Broadcast
+ * enable/disable (per physical function) # List of individual multicast filters
+ * (Driver needs to advertise the maximum number of filters supported) # All
+ * multicast enable/disable per physical function # Unicast filters per vnic #
+ * Promiscuous mode per PF Implementation notes on the use of VNIC in this
+ * command: # By default, these filters belong to default vnic for the function.
+ * # Once these filters are set up, only destination VNIC can be modified. # If
+ * the destination VNIC is not specified in this command, then the HWRM shall
+ * only create an l2 context id. HWRM Implementation notes for multicast
+ * filters: # The hwrm_filter_alloc command can be used to set up multicast
+ * filters (perfect match or partial match). Each individual function driver can
+ * set up multicast filters independently. # The HWRM needs to keep track of
+ * multicast filters set up by function drivers and maintain multicast group
+ * replication records to enable a subset of functions to receive traffic for a
+ * specific multicast address. # When a specific multicast filter cannot be set,
+ * the HWRM shall return an error. In this error case, the driver should fall
+ * back to using one general filter (rather than specific) for all multicast
+ * traffic. # When the SR-IOV is enabled, the HWRM needs to additionally track
+ * source knockout per multicast group record. Examples of setting unicast
+ * filters: For a unicast MAC based filter, one can use a combination of the
+ * fields and masks provided in this command to set up the filter. Below are
+ * some examples: # MAC + no VLAN filter: This filter is used to identify
+ * traffic that does not contain any VLAN tags and matches destination (or
+ * source) MAC address. This filter can be set up by setting only l2_addr field
+ * to be a valid field. All other fields are not valid. The following value is
+ * set for l2_addr. l2_addr = MAC # MAC + Any VLAN filter: This filter is used
+ * to identify traffic that carries single VLAN tag and matches (destination or
+ * source) MAC address. This filter can be set up by setting only l2_addr and
+ * l2_ovlan_mask fields to be valid fields. All other fields are not valid. The
+ * following values are set for those two valid fields. l2_addr = MAC,
+ * l2_ovlan_mask = 0xFFFF # MAC + no VLAN or VLAN ID=0: This filter is used to
+ * identify untagged traffic that does not contain any VLAN tags or a VLAN tag
+ * with VLAN ID = 0 and matches destination (or source) MAC address. This filter
+ * can be set up by setting only l2_addr and l2_ovlan fields to be valid fields.
+ * All other fields are not valid. The following value are set for l2_addr and
+ * l2_ovlan. l2_addr = MAC, l2_ovlan = 0x0 # MAC + no VLAN or any VLAN: This
+ * filter is used to identify traffic that contains zero or 1 VLAN tag and
+ * matches destination (or source) MAC address. This filter can be set up by
+ * setting only l2_addr, l2_ovlan, and l2_mask fields to be valid fields. All
+ * other fields are not valid. The following value are set for l2_addr,
+ * l2_ovlan, and l2_mask fields. l2_addr = MAC, l2_ovlan = 0x0, l2_ovlan_mask =
+ * 0xFFFF # MAC + VLAN ID filter: This filter can be set up by setting only
+ * l2_addr, l2_ovlan, and l2_ovlan_mask fields to be valid fields. All other
+ * fields are not valid. The following values are set for those three valid
+ * fields. l2_addr = MAC, l2_ovlan = VLAN ID, l2_ovlan_mask = 0xF000
*/
-/* Input (96 bytes) */
+/* Input (96 bytes) */
struct hwrm_cfa_l2_filter_alloc_input {
uint16_t req_type;
/*
@@ -5136,10 +8844,12 @@ struct hwrm_cfa_l2_filter_alloc_input {
*/
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
/*
* Setting of this flag indicates the applicability to the
@@ -5156,60 +8866,70 @@ struct hwrm_cfa_l2_filter_alloc_input {
* should not be specified. If this flag is set, then l2_*
* fields refer to fields of outermost L2 header.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
uint32_t enables;
/* This bit must be '1' for the l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
/* This bit must be '1' for the l2_addr_mask field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
+ UINT32_C(0x2)
/* This bit must be '1' for the l2_ovlan field to be configured. */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4)
/*
* This bit must be '1' for the l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK UINT32_C(0x8)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
+ UINT32_C(0x8)
/* This bit must be '1' for the l2_ivlan field to be configured. */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10)
/*
* This bit must be '1' for the l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK UINT32_C(0x20)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
+ UINT32_C(0x20)
/* This bit must be '1' for the t_l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
/*
* This bit must be '1' for the t_l2_addr_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK UINT32_C(0x80)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
+ UINT32_C(0x80)
/* This bit must be '1' for the t_l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN UINT32_C(0x100)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the t_l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK UINT32_C(0x200)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
+ UINT32_C(0x200)
/* This bit must be '1' for the t_l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN UINT32_C(0x400)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
+ UINT32_C(0x400)
/*
* This bit must be '1' for the t_l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK UINT32_C(0x800)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
+ UINT32_C(0x800)
/* This bit must be '1' for the src_type field to be configured. */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000)
/* This bit must be '1' for the src_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
/* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x4000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x4000)
/* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID UINT32_C(0x10000)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x10000)
uint8_t l2_addr[6];
/*
* This value sets the match value for the L2 MAC address.
@@ -5297,34 +9017,38 @@ struct hwrm_cfa_l2_filter_alloc_input {
uint8_t tunnel_type;
/* Tunnel Type. */
/* Non-tunnel */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
/*
* Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
+ * Encapsulation (NVGRE)
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
/*
- * Generic Routing Encapsulation (GRE) inside
+ * Generic Routing Encapsulation (GRE) inside
* Ethernet payload
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
/* IP in IP */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
+ /* Stateless Transport Tunnel (STT) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
/*
- * Generic Routing Encapsulation (GRE) inside IP
+ * Generic Routing Encapsulation (GRE) inside IP
* datagram payload
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
/* Any tunneled traffic */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL UINT32_C(0xff)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
uint8_t unused_7;
uint16_t dst_id;
/*
@@ -5340,11 +9064,14 @@ struct hwrm_cfa_l2_filter_alloc_input {
* filter table.
*/
/* No preference */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER UINT32_C(0x0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
+ UINT32_C(0x0)
/* Above the given filter */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \
+ UINT32_C(0x1)
/* Below the given filter */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \
+ UINT32_C(0x2)
/* As high as possible */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3)
/* As low as possible */
@@ -5359,7 +9086,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
*/
} __attribute__((packed));
-/* Output (24 bytes) */
+/* Output (24 bytes) */
struct hwrm_cfa_l2_filter_alloc_output {
uint16_t error_code;
/*
@@ -5407,7 +9134,7 @@ struct hwrm_cfa_l2_filter_alloc_output {
* Description: Free a L2 filter. The HWRM shall free all associated filter
* resources with the L2 filter.
*/
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_cfa_l2_filter_free_input {
uint16_t req_type;
/*
@@ -5442,7 +9169,7 @@ struct hwrm_cfa_l2_filter_free_input {
*/
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_cfa_l2_filter_free_output {
uint16_t error_code;
/*
@@ -5476,7 +9203,7 @@ struct hwrm_cfa_l2_filter_free_output {
/* hwrm_cfa_l2_filter_cfg */
/* Description: Change the configuration of an existing L2 filter */
-/* Input (40 bytes) */
+/* Input (40 bytes) */
struct hwrm_cfa_l2_filter_cfg_input {
uint16_t req_type;
/*
@@ -5512,10 +9239,12 @@ struct hwrm_cfa_l2_filter_cfg_input {
*/
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
/*
* Setting of this flag indicates drop action. If this flag is
@@ -5529,7 +9258,8 @@ struct hwrm_cfa_l2_filter_cfg_input {
* This bit must be '1' for the new_mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ UINT32_C(0x2)
uint64_t l2_filter_id;
/*
* This value identifies a set of CFA data structures used for
@@ -5545,7 +9275,7 @@ struct hwrm_cfa_l2_filter_cfg_input {
/* New Logical VNIC ID of the VNIC where traffic is mirrored. */
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_cfa_l2_filter_cfg_output {
uint16_t error_code;
/*
@@ -5579,7 +9309,7 @@ struct hwrm_cfa_l2_filter_cfg_output {
/* hwrm_cfa_l2_set_rx_mask */
/* Description: This command will set rx mask of the function. */
-/* Input (56 bytes) */
+/* Input (56 bytes) */
struct hwrm_cfa_l2_set_rx_mask_input {
uint16_t req_type;
/*
@@ -5632,14 +9362,14 @@ struct hwrm_cfa_l2_set_rx_mask_input {
* the promiscuous mode. The HWRM should accept any function to
* set up promiscuous mode. The HWRM shall follow the semantics
* below for the promiscuous mode support. # When partitioning
- * is not enabled on a port (i.e. single PF on the port), then
+ * is not enabled on a port (i.e. single PF on the port), then
* the PF shall be allowed to be in the promiscuous mode. When
* the PF is in the promiscuous mode, then it shall receive all
* host bound traffic on that port. # When partitioning is
- * enabled on a port (i.e. multiple PFs per port) and a PF on
+ * enabled on a port (i.e. multiple PFs per port) and a PF on
* that port is in the promiscuous mode, then the PF receives
* all traffic within that partition as identified by a unique
- * identifier for the PF (e.g. S-Tag). If a unique outer VLAN
+ * identifier for the PF (e.g. S-Tag). If a unique outer VLAN
* for the PF is specified, then the setting of promiscuous mode
* on that PF shall result in the PF receiving all host bound
* traffic with matching outer VLAN. # A VF shall can be set in
@@ -5652,7 +9382,7 @@ struct hwrm_cfa_l2_set_rx_mask_input {
* mode on a function independently from the promiscuous mode
* settings on other functions.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
/*
* If this flag is set, the corresponding RX filters shall be
* set up to cover multicast/broadcast filters for the outermost
@@ -5685,7 +9415,8 @@ struct hwrm_cfa_l2_set_rx_mask_input {
* set at most one flag out of vlanonly, vlan_nonvlan, and
* anyvlan_nonvlan.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN UINT32_C(0x100)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \
+ UINT32_C(0x100)
uint64_t mc_tbl_addr;
/* This is the address for mcast address tbl. */
uint32_t num_mc_entries;
@@ -5708,7 +9439,7 @@ struct hwrm_cfa_l2_set_rx_mask_input {
uint32_t unused_1;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_cfa_l2_set_rx_mask_output {
uint16_t error_code;
/*
@@ -5740,12 +9471,364 @@ struct hwrm_cfa_l2_set_rx_mask_output {
*/
} __attribute__((packed));
+/* hwrm_cfa_vlan_antispoof_cfg */
+/* Description: Configures vlan anti-spoof filters for VF. */
+/* Input (32 bytes) */
+struct hwrm_cfa_vlan_antispoof_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being configured. Only valid for
+ * a VF FID configured by the PF.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t num_vlan_entries;
+ /* Number of VLAN entries in the vlan_tag_mask_tbl. */
+ uint64_t vlan_tag_mask_tbl_addr;
+ /*
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN antispoof
+ * table. Each table entry contains the 16-bit TPID (0x8100 or 0x88a8
+ * only), 16-bit VLAN ID, and a 16-bit mask, all in network order to
+ * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, the mask
+ * value should be 0xfff for the 12-bit VLAN ID.
+ */
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vlan_antispoof_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+};
+
+/* hwrm_tunnel_dst_port_query */
+/*
+ * Description: This function is called by a driver to query tunnel type
+ * specific destination port configuration.
+ */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t tunnel_dst_port_id;
+ /*
+ * This field represents the identifier of L4 destination port
+ * used for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP) transports
+ * for tunneling.
+ */
+ uint16_t tunnel_dst_port_val;
+ /*
+ * This field represents the value of L4 destination port
+ * identified by tunnel_dst_port_id. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP) transports
+ * for tunneling. This field is in network byte order. A value
+ * of 0 means that the destination port is not configured.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_alloc */
+/*
+ * Description: This function is called by a driver to allocate l4 destination
+ * port for a specific tunnel type. The destination port value is provided in
+ * the input. If the HWRM supports only one global destination port for a tunnel
+ * type, then the HWRM shall keep track of its usage as described below. # The
+ * first caller that allocates a destination port shall always succeed and the
+ * HWRM shall save the destination port configuration for that tunnel type and
+ * increment the usage count to 1. # Subsequent callers allocating the same
+ * destination port for that tunnel type shall succeed and the HWRM shall
+ * increment the usage count for that port for each subsequent caller that
+ * succeeds. # Any subsequent caller trying to allocate a different destination
+ * port for that tunnel type shall fail until the usage count for the original
+ * destination port goes to zero. # A caller that frees a port will cause the
+ * usage count for that port to decrement.
+ */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ uint8_t unused_0;
+ uint16_t tunnel_dst_port_val;
+ /*
+ * This field represents the value of L4 destination port used
+ * for the given tunnel type. This field is valid for specific
+ * tunnel types that use layer 4 (e.g. UDP) transports for
+ * tunneling. This field is in network byte order. A value of 0
+ * shall fail the command.
+ */
+ uint32_t unused_1;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t tunnel_dst_port_id;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only
+ * applies to tunnel types that has l4 destination port
+ * parameters.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_free */
+/*
+ * Description: This function is called by a driver to free l4 destination port
+ * for a specific tunnel type.
+ */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ uint8_t unused_0;
+ uint16_t tunnel_dst_port_id;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only
+ * applies to tunnel types that has l4 destination port
+ * parameters.
+ */
+ uint32_t unused_1;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_stat_ctx_alloc */
/*
* Description: This command allocates and does basic preparation for a stat
* context.
*/
-/* Input (32 bytes) */
+/* Input (32 bytes) */
struct hwrm_stat_ctx_alloc_input {
uint16_t req_type;
/*
@@ -5783,10 +9866,24 @@ struct hwrm_stat_ctx_alloc_input {
* used. In this case, the stat block can only be read by
* hwrm_stat_ctx_query command.
*/
- uint32_t unused_0;
+ uint8_t stat_ctx_flags;
+ /*
+ * This field is used to specify statistics context specific
+ * configuration flags.
+ */
+ /*
+ * When this bit is set to '1', the statistics context shall be
+ * allocated for RoCE traffic only. In this case, traffic other
+ * than offloaded RoCE traffic shall not be included in this
+ * statistic context. When this bit is set to '0', the
+ * statistics context shall be used for the network traffic
+ * other than offloaded RoCE traffic.
+ */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ uint8_t unused_0[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_stat_ctx_alloc_output {
uint16_t error_code;
/*
@@ -5821,7 +9918,7 @@ struct hwrm_stat_ctx_alloc_output {
/* hwrm_stat_ctx_free */
/* Description: This command is used to free a stat context. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_stat_ctx_free_input {
uint16_t req_type;
/*
@@ -5854,7 +9951,7 @@ struct hwrm_stat_ctx_free_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_stat_ctx_free_output {
uint16_t error_code;
/*
@@ -5889,7 +9986,7 @@ struct hwrm_stat_ctx_free_output {
/* hwrm_stat_ctx_clr_stats */
/* Description: This command clears statistics of a context. */
-/* Input (24 bytes) */
+/* Input (24 bytes) */
struct hwrm_stat_ctx_clr_stats_input {
uint16_t req_type;
/*
@@ -5922,7 +10019,7 @@ struct hwrm_stat_ctx_clr_stats_input {
uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_stat_ctx_clr_stats_output {
uint16_t error_code;
/*
@@ -5954,6 +10051,113 @@ struct hwrm_stat_ctx_clr_stats_output {
*/
} __attribute__((packed));
+/* hwrm_stat_ctx_query */
+/* Description: This command returns statistics of a context. */
+/* Input (24 bytes) */
+
+struct hwrm_stat_ctx_query_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t stat_ctx_id;
+ /* ID of the statistics context that is being queried. */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (176 bytes) */
+
+struct hwrm_stat_ctx_query_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_err_pkts;
+ /* Number of transmitted packets with error */
+ uint64_t tx_drop_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t rx_ucast_pkts;
+ /* Number of received unicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_err_pkts;
+ /* Number of received packets with error */
+ uint64_t rx_drop_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast packets */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregated unicast bytes */
+ uint64_t rx_agg_events;
+ /* Number of aggregation events */
+ uint64_t rx_agg_aborts;
+ /* Number of aborted aggregations */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_exec_fwd_resp */
/*
* Description: This command is used to send an encapsulated request to the
@@ -5964,7 +10168,7 @@ struct hwrm_stat_ctx_clr_stats_output {
* acknowledge the receipt of the encapsulated request and forwarding of the
* response.
*/
-/* Input (128 bytes) */
+/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
uint16_t req_type;
/*
@@ -6008,7 +10212,7 @@ struct hwrm_exec_fwd_resp_input {
uint16_t unused_0[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* Output (16 bytes) */
struct hwrm_exec_fwd_resp_output {
uint16_t error_code;
/*
@@ -6040,4 +10244,631 @@ struct hwrm_exec_fwd_resp_output {
*/
} __attribute__((packed));
-#endif
+/* hwrm_reject_fwd_resp */
+/*
+ * Description: This command is used to send an encapsulated request to the
+ * HWRM. This command instructs the HWRM to reject the request and forward the
+ * error response of the encapsulated request to the location specified in the
+ * original request that is encapsulated. The target id of this command shall be
+ * set to 0xFFFF (HWRM). The response location in this command shall be used to
+ * acknowledge the receipt of the encapsulated request and forwarding of the
+ * response.
+ */
+/* Input (128 bytes) */
+struct hwrm_reject_fwd_resp_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This is an encapsulated request. This request should be
+ * rejected by the HWRM and the error response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint16_t encap_resp_target_id;
+ /*
+ * This value indicates the target id of the response to the
+ * encapsulated request. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
+ * HWRM
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* Hardware Resource Manager Specification */
+/* Description: This structure is used to specify port description. */
+/*
+ * Note: The Hardware Resource Manager (HWRM) manages various hardware resources
+ * inside the chip. The HWRM is implemented in firmware, and runs on embedded
+ * processors inside the chip. This firmware service is vital part of the chip.
+ * The chip can not be used by a driver or HWRM client without the HWRM.
+ */
+/* Input (16 bytes) */
+struct input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+} __attribute__((packed));
+
+/* Output (8 bytes) */
+struct output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+} __attribute__((packed));
+
+/* Short Command Structure (16 bytes) */
+struct hwrm_short_input {
+ uint16_t req_type;
+ uint16_t signature;
+ #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD (UINT32_C(0x4321))
+ uint16_t unused_0;
+ uint16_t size;
+ uint64_t req_addr;
+} __attribute__((packed));
+
+#define HWRM_GET_HWRM_ERROR_CODE(arg) \
+ { \
+ typeof(arg) x = (arg); \
+ ((x) == 0xf ? "HWRM_ERROR" : \
+ ((x) == 0xffff ? "CMD_NOT_SUPPORTED" : \
+ ((x) == 0xfffe ? "UNKNOWN_ERR" : \
+ ((x) == 0x4 ? "RESOURCE_ALLOC_ERROR" : \
+ ((x) == 0x5 ? "INVALID_FLAGS" : \
+ ((x) == 0x6 ? "INVALID_ENABLES" : \
+ ((x) == 0x0 ? "SUCCESS" : \
+ ((x) == 0x1 ? "FAIL" : \
+ ((x) == 0x2 ? "INVALID_PARAMS" : \
+ ((x) == 0x3 ? "RESOURCE_ACCESS_DENIED" : \
+ "Unknown error_code")))))))))) \
+ }
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ uint16_t error_code;
+ /* These are numbers assigned to return/error codes. */
+ /* Request was successfully executed by the HWRM. */
+ #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0))
+ /* THe HWRM failed to execute the request. */
+ #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1))
+ /*
+ * The request contains invalid argument(s) or
+ * input parameters.
+ */
+ #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
+ /*
+ * The requester is not allowed to access the
+ * requested resource. This error code shall be
+ * provided in a response to a request to query
+ * or modify an existing resource that is not
+ * accessible by the requester.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
+ /*
+ * The HWRM is unable to allocate the requested
+ * resource. This code only applies to requests
+ * for HWRM resource allocations.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4))
+ /* Invalid combination of flags is specified in the request. */
+ #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5))
+ /*
+ * Invalid combination of enables fields is
+ * specified in the request.
+ */
+ #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6))
+ /*
+ * Generic HWRM execution error that represents
+ * an internal error.
+ */
+ #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf))
+ /* Unknown error */
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe))
+ /* Unsupported or invalid command */
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff))
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t opaque_0;
+ /* debug info for this error response. */
+ uint16_t opaque_1;
+ /* debug info for this error response. */
+ uint8_t cmd_err;
+ /*
+ * In the case of an error response, command specific error code
+ * is returned in this field.
+ */
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ uint64_t tx_64b_frames;
+ /* Total Number of 64 Bytes frames transmitted */
+ uint64_t tx_65b_127b_frames;
+ /* Total Number of 65-127 Bytes frames transmitted */
+ uint64_t tx_128b_255b_frames;
+ /* Total Number of 128-255 Bytes frames transmitted */
+ uint64_t tx_256b_511b_frames;
+ /* Total Number of 256-511 Bytes frames transmitted */
+ uint64_t tx_512b_1023b_frames;
+ /* Total Number of 512-1023 Bytes frames transmitted */
+ uint64_t tx_1024b_1518_frames;
+ /* Total Number of 1024-1518 Bytes frames transmitted */
+ uint64_t tx_good_vlan_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors) frame
+ * transmitted which is 1519 to 1522 bytes in length inclusive
+ * (excluding framing bits but including FCS bytes).
+ */
+ uint64_t tx_1519b_2047_frames;
+ /* Total Number of 1519-2047 Bytes frames transmitted */
+ uint64_t tx_2048b_4095b_frames;
+ /* Total Number of 2048-4095 Bytes frames transmitted */
+ uint64_t tx_4096b_9216b_frames;
+ /* Total Number of 4096-9216 Bytes frames transmitted */
+ uint64_t tx_9217b_16383b_frames;
+ /* Total Number of 9217-16383 Bytes frames transmitted */
+ uint64_t tx_good_frames;
+ /* Total Number of good frames transmitted */
+ uint64_t tx_total_frames;
+ /* Total Number of frames transmitted */
+ uint64_t tx_ucast_frames;
+ /* Total number of unicast frames transmitted */
+ uint64_t tx_mcast_frames;
+ /* Total number of multicast frames transmitted */
+ uint64_t tx_bcast_frames;
+ /* Total number of broadcast frames transmitted */
+ uint64_t tx_pause_frames;
+ /* Total number of PAUSE control frames transmitted */
+ uint64_t tx_pfc_frames;
+ /* Total number of PFC/per-priority PAUSE control frames transmitted */
+ uint64_t tx_jabber_frames;
+ /* Total number of jabber frames transmitted */
+ uint64_t tx_fcs_err_frames;
+ /* Total number of frames transmitted with FCS error */
+ uint64_t tx_control_frames;
+ /* Total number of control frames transmitted */
+ uint64_t tx_oversz_frames;
+ /* Total number of over-sized frames transmitted */
+ uint64_t tx_single_dfrl_frames;
+ /* Total number of frames with single deferral */
+ uint64_t tx_multi_dfrl_frames;
+ /* Total number of frames with multiple deferrals */
+ uint64_t tx_single_coll_frames;
+ /* Total number of frames with single collision */
+ uint64_t tx_multi_coll_frames;
+ /* Total number of frames with multiple collisions */
+ uint64_t tx_late_coll_frames;
+ /* Total number of frames with late collisions */
+ uint64_t tx_excessive_coll_frames;
+ /* Total number of frames with excessive collisions */
+ uint64_t tx_frag_frames;
+ /* Total number of fragmented frames transmitted */
+ uint64_t tx_err;
+ /* Total number of transmit errors */
+ uint64_t tx_tagged_frames;
+ /* Total number of single VLAN tagged frames transmitted */
+ uint64_t tx_dbl_tagged_frames;
+ /* Total number of double VLAN tagged frames transmitted */
+ uint64_t tx_runt_frames;
+ /* Total number of runt frames transmitted */
+ uint64_t tx_fifo_underruns;
+ /* Total number of TX FIFO under runs */
+ uint64_t tx_pfc_ena_frames_pri0;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 0
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri1;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 1
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri2;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 2
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri3;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 3
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri4;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 4
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri5;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 5
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri6;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 6
+ * transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri7;
+ /*
+ * Total number of PFC frames with PFC enabled bit for Pri 7
+ * transmitted
+ */
+ uint64_t tx_eee_lpi_events;
+ /* Total number of EEE LPI Events on TX */
+ uint64_t tx_eee_lpi_duration;
+ /* EEE LPI Duration Counter on TX */
+ uint64_t tx_llfc_logical_msgs;
+ /*
+ * Total number of Link Level Flow Control (LLFC) messages
+ * transmitted
+ */
+ uint64_t tx_hcfc_msgs;
+ /* Total number of HCFC messages transmitted */
+ uint64_t tx_total_collisions;
+ /* Total number of TX collisions */
+ uint64_t tx_bytes;
+ /* Total number of transmitted bytes */
+ uint64_t tx_xthol_frames;
+ /* Total number of end-to-end HOL frames */
+ uint64_t tx_stat_discard;
+ /* Total Tx Drops per Port reported by STATS block */
+ uint64_t tx_stat_error;
+ /* Total Tx Error Drops per Port reported by STATS block */
+} __attribute__((packed));
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ uint64_t rx_64b_frames;
+ /* Total Number of 64 Bytes frames received */
+ uint64_t rx_65b_127b_frames;
+ /* Total Number of 65-127 Bytes frames received */
+ uint64_t rx_128b_255b_frames;
+ /* Total Number of 128-255 Bytes frames received */
+ uint64_t rx_256b_511b_frames;
+ /* Total Number of 256-511 Bytes frames received */
+ uint64_t rx_512b_1023b_frames;
+ /* Total Number of 512-1023 Bytes frames received */
+ uint64_t rx_1024b_1518_frames;
+ /* Total Number of 1024-1518 Bytes frames received */
+ uint64_t rx_good_vlan_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors) frame
+ * received which is 1519 to 1522 bytes in length inclusive
+ * (excluding framing bits but including FCS bytes).
+ */
+ uint64_t rx_1519b_2047b_frames;
+ /* Total Number of 1519-2047 Bytes frames received */
+ uint64_t rx_2048b_4095b_frames;
+ /* Total Number of 2048-4095 Bytes frames received */
+ uint64_t rx_4096b_9216b_frames;
+ /* Total Number of 4096-9216 Bytes frames received */
+ uint64_t rx_9217b_16383b_frames;
+ /* Total Number of 9217-16383 Bytes frames received */
+ uint64_t rx_total_frames;
+ /* Total number of frames received */
+ uint64_t rx_ucast_frames;
+ /* Total number of unicast frames received */
+ uint64_t rx_mcast_frames;
+ /* Total number of multicast frames received */
+ uint64_t rx_bcast_frames;
+ /* Total number of broadcast frames received */
+ uint64_t rx_fcs_err_frames;
+ /* Total number of received frames with FCS error */
+ uint64_t rx_ctrl_frames;
+ /* Total number of control frames received */
+ uint64_t rx_pause_frames;
+ /* Total number of PAUSE frames received */
+ uint64_t rx_pfc_frames;
+ /* Total number of PFC frames received */
+ uint64_t rx_unsupported_opcode_frames;
+ /* Total number of frames received with an unsupported opcode */
+ uint64_t rx_unsupported_da_pausepfc_frames;
+ /*
+ * Total number of frames received with an unsupported DA for
+ * pause and PFC
+ */
+ uint64_t rx_wrong_sa_frames;
+ /* Total number of frames received with an unsupported SA */
+ uint64_t rx_align_err_frames;
+ /* Total number of received packets with alignment error */
+ uint64_t rx_oor_len_frames;
+ /* Total number of received frames with out-of-range length */
+ uint64_t rx_code_err_frames;
+ /* Total number of received frames with error termination */
+ uint64_t rx_false_carrier_frames;
+ /*
+ * Total number of received frames with a false carrier is
+ * detected during idle, as defined by RX_ER samples active and
+ * RXD is 0xE. The event is reported along with the statistics
+ * generated on the next received frame. Only one false carrier
+ * condition can be detected and logged between frames. Carrier
+ * event, valid for 10M/100M speed modes only.
+ */
+ uint64_t rx_ovrsz_frames;
+ /* Total number of over-sized frames received */
+ uint64_t rx_jbr_frames;
+ /* Total number of jabber packets received */
+ uint64_t rx_mtu_err_frames;
+ /* Total number of received frames with MTU error */
+ uint64_t rx_match_crc_frames;
+ /* Total number of received frames with CRC match */
+ uint64_t rx_promiscuous_frames;
+ /* Total number of frames received promiscuously */
+ uint64_t rx_tagged_frames;
+ /* Total number of received frames with one or two VLAN tags */
+ uint64_t rx_double_tagged_frames;
+ /* Total number of received frames with two VLAN tags */
+ uint64_t rx_trunc_frames;
+ /* Total number of truncated frames received */
+ uint64_t rx_good_frames;
+ /* Total number of good frames (without errors) received */
+ uint64_t rx_pfc_xon2xoff_frames_pri0;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 0
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri1;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 1
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri2;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 2
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri3;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 3
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri4;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 4
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri5;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 5
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri6;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 6
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri7;
+ /*
+ * Total number of received PFC frames with transition from XON
+ * to XOFF on Pri 7
+ */
+ uint64_t rx_pfc_ena_frames_pri0;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 0
+ */
+ uint64_t rx_pfc_ena_frames_pri1;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 1
+ */
+ uint64_t rx_pfc_ena_frames_pri2;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 2
+ */
+ uint64_t rx_pfc_ena_frames_pri3;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 3
+ */
+ uint64_t rx_pfc_ena_frames_pri4;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 4
+ */
+ uint64_t rx_pfc_ena_frames_pri5;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 5
+ */
+ uint64_t rx_pfc_ena_frames_pri6;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 6
+ */
+ uint64_t rx_pfc_ena_frames_pri7;
+ /*
+ * Total number of received PFC frames with PFC enabled bit for
+ * Pri 7
+ */
+ uint64_t rx_sch_crc_err_frames;
+ /* Total Number of frames received with SCH CRC error */
+ uint64_t rx_undrsz_frames;
+ /* Total Number of under-sized frames received */
+ uint64_t rx_frag_frames;
+ /* Total Number of fragmented frames received */
+ uint64_t rx_eee_lpi_events;
+ /* Total number of RX EEE LPI Events */
+ uint64_t rx_eee_lpi_duration;
+ /* EEE LPI Duration Counter on RX */
+ uint64_t rx_llfc_physical_msgs;
+ /*
+ * Total number of physical type Link Level Flow Control (LLFC)
+ * messages received
+ */
+ uint64_t rx_llfc_logical_msgs;
+ /*
+ * Total number of logical type Link Level Flow Control (LLFC)
+ * messages received
+ */
+ uint64_t rx_llfc_msgs_with_crc_err;
+ /*
+ * Total number of logical type Link Level Flow Control (LLFC)
+ * messages received with CRC error
+ */
+ uint64_t rx_hcfc_msgs;
+ /* Total number of HCFC messages received */
+ uint64_t rx_hcfc_msgs_with_crc_err;
+ /* Total number of HCFC messages received with CRC error */
+ uint64_t rx_bytes;
+ /* Total number of received bytes */
+ uint64_t rx_runt_bytes;
+ /* Total number of bytes received in runt frames */
+ uint64_t rx_runt_frames;
+ /* Total number of runt frames received */
+ uint64_t rx_stat_discard;
+ /* Total Rx Discards per Port reported by STATS block */
+ uint64_t rx_stat_err;
+ /* Total Rx Error Drops per Port reported by STATS block */
+} __attribute__((packed));
+
+/* Periodic Statistics Context DMA to host (160 bytes) */
+/*
+ * per-context HW statistics -- chip view
+ */
+
+struct ctx_hw_stats64 {
+ uint64_t rx_ucast_pkts;
+ uint64_t rx_mcast_pkts;
+ uint64_t rx_bcast_pkts;
+ uint64_t rx_drop_pkts;
+ uint64_t rx_discard_pkts;
+ uint64_t rx_ucast_bytes;
+ uint64_t rx_mcast_bytes;
+ uint64_t rx_bcast_bytes;
+
+ uint64_t tx_ucast_pkts;
+ uint64_t tx_mcast_pkts;
+ uint64_t tx_bcast_pkts;
+ uint64_t tx_drop_pkts;
+ uint64_t tx_discard_pkts;
+ uint64_t tx_ucast_bytes;
+ uint64_t tx_mcast_bytes;
+ uint64_t tx_bcast_bytes;
+
+ uint64_t tpa_pkts;
+ uint64_t tpa_bytes;
+ uint64_t tpa_events;
+ uint64_t tpa_aborts;
+} __attribute__((packed));
+
+#endif /* _HSI_STRUCT_DEF_DPDK_ */
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
new file mode 100644
index 00000000..c343d903
--- /dev/null
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -0,0 +1,843 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_byteorder.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "rte_pmd_bnxt.h"
+#include "hsi_struct_def_dpdk.h"
+
+int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
+{
+ struct rte_pmd_bnxt_mb_event_param ret_param;
+
+ ret_param.retval = RTE_PMD_BNXT_MB_EVENT_PROCEED;
+ ret_param.vf_id = vf_id;
+ ret_param.msg = msg;
+
+ _rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
+
+ /* Default to approve */
+ if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED)
+ ret_param.retval = RTE_PMD_BNXT_MB_EVENT_NOOP_ACK;
+
+ return ret_param.retval == RTE_PMD_BNXT_MB_EVENT_NOOP_ACK ?
+ true : false;
+}
+
+int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set Tx loopback on non-PF port %d!\n",
+ port);
+ return -ENOTSUP;
+ }
+
+ if (on)
+ bp->pf.evb_mode = BNXT_EVB_MODE_VEB;
+ else
+ bp->pf.evb_mode = BNXT_EVB_MODE_VEPA;
+
+ rc = bnxt_hwrm_pf_evb_mode(bp);
+
+ return rc;
+}
+
+static void
+rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr)
+{
+ uint8_t *on = onptr;
+ vnic->bd_stall = !(*on);
+}
+
+int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+ uint32_t i;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set all queues drop on non-PF port!\n");
+ return -ENOTSUP;
+ }
+
+ if (bp->vnic_info == NULL)
+ return -ENODEV;
+
+ /* Stall PF */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ bp->vnic_info[i].bd_stall = !on;
+ rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
+ return rc;
+ }
+ }
+
+ /* Stall all active VFs */
+ for (i = 0; i < bp->pf.active_vfs; i++) {
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i,
+ rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
+ bnxt_hwrm_vnic_cfg);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs || mac_addr == NULL)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VF %d mac address on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_func_vf_mac(bp, vf, (uint8_t *)mac_addr);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ uint16_t tot_rate = 0;
+ uint64_t idx;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ eth_dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(eth_dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (!bp->pf.active_vfs)
+ return -EINVAL;
+
+ if (vf >= bp->pf.max_vfs)
+ return -EINVAL;
+
+ /* Add up the per queue BW and configure MAX BW of the VF */
+ for (idx = 0; idx < 64; idx++) {
+ if ((1ULL << idx) & q_msk)
+ tot_rate += tx_rate;
+ }
+
+ /* Requested BW can't be greater than link speed */
+ if (tot_rate > eth_dev->data->dev_link.link_speed) {
+ RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
+ return -EINVAL;
+ }
+
+ /* Requested BW already configured */
+ if (tot_rate == bp->pf.vf_info[vf].max_tx_rate)
+ return 0;
+
+ rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate,
+ HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW);
+
+ if (!rc)
+ bp->pf.vf_info[vf].max_tx_rate = tot_rate;
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ uint32_t func_flags;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set mac spoof on non-PF port %d!\n", port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ /* Prev setting same as new setting. */
+ if (on == bp->pf.vf_info[vf].mac_spoof_en)
+ return 0;
+
+ func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+ func_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE |
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE);
+
+ if (on)
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+ else
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
+ if (!rc) {
+ bp->pf.vf_info[vf].mac_spoof_en = on;
+ bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VLAN spoof on non-PF port %d!\n", port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on == bp->pf.vf_info[vf].vlan_spoof_en)
+ return 0;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on);
+ if (!rc) {
+ bp->pf.vf_info[vf].vlan_spoof_en = on;
+ if (on) {
+ if (bnxt_hwrm_cfa_vlan_antispoof_cfg(bp,
+ bp->pf.first_vf_id + vf,
+ bp->pf.vf_info[vf].vlan_count,
+ bp->pf.vf_info[vf].vlan_as_table))
+ rc = -1;
+ }
+ } else {
+ RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ }
+
+ return rc;
+}
+
+static void
+rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr)
+{
+ uint8_t *on = onptr;
+ vnic->vlan_strip = *on;
+}
+
+int
+rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VF %d stripq on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+ rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
+ bnxt_hwrm_vnic_cfg);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint16_t flag = 0;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!bp->pf.vf_info)
+ return -EINVAL;
+
+ if (vf >= bp->pdev->max_vfs)
+ return -EINVAL;
+
+ if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) {
+ RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ return -ENOTSUP;
+ }
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) {
+ RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n");
+ return -ENOTSUP;
+ }
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ flag |= BNXT_VNIC_INFO_BCAST;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ flag |= BNXT_VNIC_INFO_ALLMULTI;
+
+ if (on)
+ bp->pf.vf_info[vf].l2_rx_mask |= flag;
+ else
+ bp->pf.vf_info[vf].l2_rx_mask &= ~flag;
+
+ rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf,
+ vf_vnic_set_rxmask_cb,
+ &bp->pf.vf_info[vf].l2_rx_mask,
+ bnxt_set_rx_mask_no_vlan);
+ if (rc)
+ RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+
+ return rc;
+}
+
+static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
+{
+ int rc = 0;
+ int dflt_vnic;
+ struct bnxt_vnic_info vnic;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VLAN table on non-PF port!\n");
+ return -EINVAL;
+ }
+
+ if (vf >= bp->pdev->max_vfs)
+ return -EINVAL;
+
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver loaded.
+ * This is not an error.
+ */
+ RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
+ } else {
+ memset(&vnic, 0, sizeof(vnic));
+ vnic.fw_vnic_id = dflt_vnic;
+ if (bnxt_hwrm_vnic_qcfg(bp, &vnic,
+ bp->pf.first_vf_id + vf) == 0) {
+ if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, &vnic,
+ bp->pf.vf_info[vf].vlan_count,
+ bp->pf.vf_info[vf].vlan_table))
+ rc = -1;
+ } else {
+ rc = -1;
+ }
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct bnxt_vlan_table_entry *ve;
+ struct bnxt_vlan_antispoof_table_entry *vase;
+ struct rte_eth_dev *dev;
+ struct bnxt *bp;
+ uint16_t cnt;
+ int rc = 0;
+ int i, j;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ bp = (struct bnxt *)dev->data->dev_private;
+ if (!bp->pf.vf_info)
+ return -EINVAL;
+
+ for (i = 0; vf_mask; i++, vf_mask >>= 1) {
+ cnt = bp->pf.vf_info[i].vlan_count;
+ if ((vf_mask & 1) == 0)
+ continue;
+
+ if (bp->pf.vf_info[i].vlan_table == NULL) {
+ rc = -1;
+ continue;
+ }
+ if (bp->pf.vf_info[i].vlan_as_table == NULL) {
+ rc = -1;
+ continue;
+ }
+ if (vlan_on) {
+ /* First, search for a duplicate... */
+ for (j = 0; j < cnt; j++) {
+ if (rte_be_to_cpu_16(
+ bp->pf.vf_info[i].vlan_table[j].vid) == vlan)
+ break;
+ }
+ if (j == cnt) {
+ /* Now check that there's space */
+ if (cnt == getpagesize() / sizeof(struct
+ bnxt_vlan_antispoof_table_entry)) {
+ RTE_LOG(ERR, PMD,
+ "VLAN anti-spoof table is full\n");
+ RTE_LOG(ERR, PMD,
+ "VF %d cannot add VLAN %u\n",
+ i, vlan);
+ rc = -1;
+ continue;
+ }
+
+ /* cnt is one less than vlan_count */
+ cnt = bp->pf.vf_info[i].vlan_count++;
+ /*
+ * And finally, add to the
+ * end of the table
+ */
+ vase = &bp->pf.vf_info[i].vlan_as_table[cnt];
+ // TODO: Hardcoded TPID
+ vase->tpid = rte_cpu_to_be_16(0x8100);
+ vase->vid = rte_cpu_to_be_16(vlan);
+ vase->mask = rte_cpu_to_be_16(0xfff);
+ ve = &bp->pf.vf_info[i].vlan_table[cnt];
+ /* TODO: Hardcoded TPID */
+ ve->tpid = rte_cpu_to_be_16(0x8100);
+ ve->vid = rte_cpu_to_be_16(vlan);
+ }
+ } else {
+ for (j = 0; j < cnt; j++) {
+ if (rte_be_to_cpu_16(
+ bp->pf.vf_info[i].vlan_table[j].vid) != vlan)
+ continue;
+ memmove(&bp->pf.vf_info[i].vlan_table[j],
+ &bp->pf.vf_info[i].vlan_table[j + 1],
+ getpagesize() - ((j + 1) *
+ sizeof(struct bnxt_vlan_table_entry)));
+ memmove(&bp->pf.vf_info[i].vlan_as_table[j],
+ &bp->pf.vf_info[i].vlan_as_table[j + 1],
+ getpagesize() - ((j + 1) * sizeof(struct
+ bnxt_vlan_antispoof_table_entry)));
+ j--;
+ cnt = --bp->pf.vf_info[i].vlan_count;
+ }
+ }
+ bnxt_set_vf_table(bp, i);
+ }
+
+ return rc;
+}
+
+int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to get VF %d stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats);
+}
+
+int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to reset VF %d stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id);
+}
+
+int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to query VF %d RX stats on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_vf_vnic_count(bp, vf_id);
+}
+
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+ uint64_t *count)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to query VF %d TX drops on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf.first_vf_id + vf_id,
+ count);
+}
+
+int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
+ uint32_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info vnic;
+ struct ether_addr dflt_mac;
+ int rc;
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf_id >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to config VF %d MAC on non-PF port %d!\n",
+ vf_id, port);
+ return -ENOTSUP;
+ }
+
+ /* If the VF currently uses a random MAC, update default to this one */
+ if (bp->pf.vf_info[vf_id].random_mac) {
+ if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0)
+ rc = bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr);
+ }
+
+ /* query the default VNIC id used by the function */
+ rc = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf_id);
+ if (rc < 0)
+ goto exit;
+
+ memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
+ vnic.fw_vnic_id = rte_le_to_cpu_16(rc);
+ rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf_id);
+ if (rc < 0)
+ goto exit;
+
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[vf_id].filter, next) {
+ if (filter->flags ==
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX &&
+ filter->enables ==
+ (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&
+ memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {
+ bnxt_hwrm_clear_filter(bp, filter);
+ break;
+ }
+ }
+
+ if (filter == NULL)
+ filter = bnxt_alloc_vf_filter(bp, vf_id);
+
+ filter->fw_l2_filter_id = UINT64_MAX;
+ filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+
+ /* Do not add a filter for the default MAC */
+ if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||
+ memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))
+ rc = bnxt_hwrm_set_filter(bp, vnic.fw_vnic_id, filter);
+
+exit:
+ return rc;
+}
+
+int
+rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_bnxt_supported(dev))
+ return -ENOTSUP;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set VF %d vlan insert on non-PF port %d!\n",
+ vf, port);
+ return -ENOTSUP;
+ }
+
+ bp->pf.vf_info[vf].dflt_vlan = vlan_id;
+ if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) ==
+ bp->pf.vf_info[vf].dflt_vlan)
+ return 0;
+
+ rc = bnxt_hwrm_set_vf_vlan(bp, vf);
+
+ return rc;
+}
+
+int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ uint32_t func_flags;
+ struct bnxt *bp;
+ int rc;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+ bp = (struct bnxt *)dev->data->dev_private;
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to set persist stats on non-PF port %d!\n",
+ port);
+ return -EINVAL;
+ }
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ /* Prev setting same as new setting. */
+ if (on == bp->pf.vf_info[vf].persist_stats)
+ return 0;
+
+ func_flags = bp->pf.vf_info[vf].func_cfg_flags;
+
+ if (on)
+ func_flags |=
+ HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC;
+ else
+ func_flags &=
+ ~HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC;
+
+ rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags);
+ if (!rc) {
+ bp->pf.vf_info[vf].persist_stats = on;
+ bp->pf.vf_info[vf].func_cfg_flags = func_flags;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h
new file mode 100644
index 00000000..c4c4770e
--- /dev/null
+++ b/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -0,0 +1,354 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PMD_BNXT_H_
+#define _PMD_BNXT_H_
+
+#include <rte_ethdev.h>
+
+/*
+ * Response sent back to the caller after callback
+ */
+enum rte_pmd_bnxt_mb_event_rsp {
+ RTE_PMD_BNXT_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_BNXT_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_BNXT_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_BNXT_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/* mailbox message types */
+#define BNXT_VF_RESET 0x01 /* VF requests reset */
+#define BNXT_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define BNXT_VF_SET_VLAN 0x03 /* VF requests PF to set VLAN */
+#define BNXT_VF_SET_MTU 0x04 /* VF requests PF to set MTU */
+#define BNXT_VF_SET_MRU 0x05 /* VF requests PF to set MRU */
+
+/*
+ * Data sent to the caller when the callback is executed.
+ */
+struct rte_pmd_bnxt_mb_event_param {
+ uint16_t vf_id; /* Virtual Function number */
+ int retval; /* return value */
+ void *msg; /* pointer to message */
+};
+
+/**
+ * Enable/Disable VF MAC anti spoof
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Enable VF MAC anti spoof.
+ * 0 - Disable VF MAC anti spoof.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+ * received VLAN packets tagged with a given VLAN Tag Identifier.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan
+ * The VLAN Tag Identifier whose filtering must be enabled or disabled.
+ * @param vf_mask
+ * Bitmap listing which VFs participate in the VLAN filtering.
+ * @param vlan_on
+ * 1 - Enable VFs VLAN filtering.
+ * 0 - Disable VFs VLAN filtering.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Enable/Disable tx loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable tx loopback.
+ * 0 - Disable tx loopback.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on);
+
+/**
+ * set all queues drop enable bit
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - set the queue drop enable bit for all pools.
+ * 0 - reset the queue drop enable bit for all pools.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on);
+
+/**
+ * Set the VF rate limit.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * Tx rate for the VF
+ * @param q_msk
+ * Mask of the Tx queue
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters supported statistics:
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+ uint16_t vf_id);
+
+/**
+ * Enable/Disable VF VLAN anti spoof
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Enable VF VLAN anti spoof.
+ * 0 - Disable VF VLAN anti spoof.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Set RX L2 Filtering mode of a VF of an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param rx_mask
+ * The RX mode mask
+ * @param on
+ * 1 - Enable a VF RX mode.
+ * 0 - Disable a VF RX mode.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on);
+
+/**
+ * Returns the number of default RX queues on a VF
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - Non-negative value - Number of default RX queues
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if on a function without VFs
+ * - (-ENOMEM) on an allocation failure
+ * - (-1) firmware interface error
+ */
+int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id);
+
+/**
+ * Queries the TX drop counter for the function
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param count
+ * Pointer to a uint64_t that will be populated with the counter value.
+ * @return
+ * - Positive Non-zero value - Error code from HWRM
+ * - (-EINVAL) invalid vf_id specified.
+ * - (-ENOTSUP) Ethernet device is not a PF
+ */
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+ uint64_t *count);
+
+/**
+ * Programs the MAC address for the function specified
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac_addr
+ * The MAC address to be programmed in the filter.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - Positive Non-zero value - Error code from HWRM
+ * - (-EINVAL) invalid vf_id specified.
+ * - (-ENOTSUP) Ethernet device is not a PF
+ * - (-ENOMEM) on an allocation failure
+ */
+int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+ uint32_t vf_id);
+
+/**
+ * Enable/Disable VF statistics retention
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param on
+ * 1 - Prevent VF statistics from automatically resetting
+ * 0 - Allow VF statistics to automatically reset
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on);
+#endif /* _PMD_BNXT_H_ */
diff --git a/drivers/net/bnxt/rte_pmd_bnxt_version.map b/drivers/net/bnxt/rte_pmd_bnxt_version.map
index 349c6e1c..4750d40a 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt_version.map
+++ b/drivers/net/bnxt/rte_pmd_bnxt_version.map
@@ -1,4 +1,22 @@
-DPDK_16.04 {
+DPDK_17.08 {
+ global:
+
+ rte_pmd_bnxt_get_vf_rx_status;
+ rte_pmd_bnxt_get_vf_stats;
+ rte_pmd_bnxt_get_vf_tx_drop_count;
+ rte_pmd_bnxt_mac_addr_add;
+ rte_pmd_bnxt_reset_vf_stats;
+ rte_pmd_bnxt_set_all_queues_drop_en;
+ rte_pmd_bnxt_set_tx_loopback;
+ rte_pmd_bnxt_set_vf_mac_addr;
+ rte_pmd_bnxt_set_vf_mac_anti_spoof;
+ rte_pmd_bnxt_set_vf_rate_limit;
+ rte_pmd_bnxt_set_vf_rxmode;
+ rte_pmd_bnxt_set_vf_vlan_anti_spoof;
+ rte_pmd_bnxt_set_vf_vlan_filter;
+ rte_pmd_bnxt_set_vf_vlan_insert;
+ rte_pmd_bnxt_set_vf_vlan_stripq;
+ rte_pmd_bnxt_set_vf_persist_stats;
local: *;
};
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 7b863d6e..20b5a896 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -44,7 +44,6 @@
#include "rte_eth_bond_private.h"
static void bond_mode_8023ad_ext_periodic_cb(void *arg);
-
#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
bond_dbg_get_time_diff_ms(), slave_id, \
@@ -435,7 +434,7 @@ periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
* In other case (was fast and now it is slow) just switch
* timeout to slow without forcing send of LACP (because standard
* say so)*/
- if (!is_partner_fast)
+ if (is_partner_fast)
SM_FLAG_SET(port, NTT);
} else
return; /* Nothing changed */
@@ -632,21 +631,53 @@ tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
lacpdu->terminator_length = 0;
- if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
- /* If TX ring full, drop packet and free message. Retransmission
- * will happen in next function call. */
- rte_pktmbuf_free(lacp_pkt);
- set_warning_flags(port, WRN_TX_QUEUE_FULL);
- return;
+ MODE4_DEBUG("Sending LACP frame\n");
+ BOND_PRINT_LACP(lacpdu);
+
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
+ if (retval != 0) {
+ /* If TX ring full, drop packet and free message.
+ Retransmission will happen in next function call. */
+ rte_pktmbuf_free(lacp_pkt);
+ set_warning_flags(port, WRN_TX_QUEUE_FULL);
+ return;
+ }
+ } else {
+ uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ &lacp_pkt, 1);
+ if (pkts_sent != 1) {
+ rte_pktmbuf_free(lacp_pkt);
+ set_warning_flags(port, WRN_TX_QUEUE_FULL);
+ return;
+ }
}
- MODE4_DEBUG("sending LACP frame\n");
- BOND_PRINT_LACP(lacpdu);
timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
SM_FLAG_CLR(port, NTT);
}
+static uint8_t
+max_index(uint64_t *a, int n)
+{
+ if (n <= 0)
+ return -1;
+
+ int i, max_i = 0;
+ uint64_t max = a[0];
+
+ for (i = 1; i < n; ++i) {
+ if (a[i] > max) {
+ max = a[i];
+ max_i = i;
+ }
+ }
+
+ return max_i;
+}
+
/**
* Function assigns port to aggregator.
*
@@ -657,8 +688,13 @@ static void
selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
{
struct port *agg, *port;
- uint8_t slaves_count, new_agg_id, i;
+ uint8_t slaves_count, new_agg_id, i, j = 0;
uint8_t *slaves;
+ uint64_t agg_bandwidth[8] = {0};
+ uint64_t agg_count[8] = {0};
+ uint8_t default_slave = 0;
+ uint8_t mode_count_id, mode_band_id;
+ struct rte_eth_link link_info;
slaves = internals->active_slaves;
slaves_count = internals->active_slave_count;
@@ -671,6 +707,10 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
if (agg->aggregator_port_id != slaves[i])
continue;
+ agg_count[agg->aggregator_port_id] += 1;
+ rte_eth_link_get_nowait(slaves[i], &link_info);
+ agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
+
/* Actors system ID is not checked since all slave device have the same
* ID (MAC address). */
if ((agg->actor.key == port->actor.key &&
@@ -681,15 +721,36 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
(agg->actor.key &
rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
- break;
+ if (j == 0)
+ default_slave = i;
+ j++;
}
}
- /* By default, port uses it self as agregator */
- if (i == slaves_count)
- new_agg_id = slave_id;
- else
- new_agg_id = slaves[i];
+ switch (internals->mode4.agg_selection) {
+ case AGG_COUNT:
+ mode_count_id = max_index(
+ (uint64_t *)agg_count, slaves_count);
+ new_agg_id = mode_count_id;
+ break;
+ case AGG_BANDWIDTH:
+ mode_band_id = max_index(
+ (uint64_t *)agg_bandwidth, slaves_count);
+ new_agg_id = mode_band_id;
+ break;
+ case AGG_STABLE:
+ if (default_slave == slaves_count)
+ new_agg_id = slave_id;
+ else
+ new_agg_id = slaves[default_slave];
+ break;
+ default:
+ if (default_slave == slaves_count)
+ new_agg_id = slave_id;
+ else
+ new_agg_id = slaves[default_slave];
+ break;
+ }
if (new_agg_id != port->aggregator_port_id) {
port->aggregator_port_id = new_agg_id;
@@ -741,6 +802,22 @@ link_speed_key(uint16_t speed) {
}
static void
+rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
+ struct rte_mbuf *lacp_pkt) {
+ struct lacpdu_header *lacp;
+
+ if (lacp_pkt != NULL) {
+ lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+ RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+ /* This is LACP frame so pass it to rx_machine */
+ rx_machine(internals, slave_id, &lacp->lacpdu);
+ rte_pktmbuf_free(lacp_pkt);
+ } else
+ rx_machine(internals, slave_id, NULL);
+}
+
+static void
bond_mode_8023ad_periodic_cb(void *arg)
{
struct rte_eth_dev *bond_dev = arg;
@@ -748,8 +825,8 @@ bond_mode_8023ad_periodic_cb(void *arg)
struct port *port;
struct rte_eth_link link_info;
struct ether_addr slave_addr;
+ struct rte_mbuf *lacp_pkt = NULL;
- void *pkt = NULL;
uint8_t i, slave_id;
@@ -758,7 +835,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
uint16_t key;
slave_id = internals->active_slaves[i];
- rte_eth_link_get(slave_id, &link_info);
+ rte_eth_link_get_nowait(slave_id, &link_info);
rte_eth_macaddr_get(slave_id, &slave_addr);
if (link_info.link_status != 0) {
@@ -809,20 +886,28 @@ bond_mode_8023ad_periodic_cb(void *arg)
SM_FLAG_SET(port, LACP_ENABLED);
- /* Find LACP packet to this port. Do not check subtype, it is done in
- * function that queued packet */
- if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
- struct rte_mbuf *lacp_pkt = pkt;
- struct lacpdu_header *lacp;
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ /* Find LACP packet to this port. Do not check subtype,
+ * it is done in function that queued packet
+ */
+ int retval = rte_ring_dequeue(port->rx_ring,
+ (void **)&lacp_pkt);
- lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
- RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+ if (retval != 0)
+ lacp_pkt = NULL;
- /* This is LACP frame so pass it to rx_machine */
- rx_machine(internals, slave_id, &lacp->lacpdu);
- rte_pktmbuf_free(lacp_pkt);
- } else
- rx_machine(internals, slave_id, NULL);
+ rx_machine_update(internals, slave_id, lacp_pkt);
+ } else {
+ uint16_t rx_count = rte_eth_rx_burst(slave_id,
+ internals->mode4.dedicated_queues.rx_qid,
+ &lacp_pkt, 1);
+
+ if (rx_count == 1)
+ bond_mode_8023ad_handle_slow_pkt(internals,
+ slave_id, lacp_pkt);
+ else
+ rx_machine_update(internals, slave_id, NULL);
+ }
periodic_machine(internals, slave_id);
mux_machine(internals, slave_id);
@@ -872,7 +957,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
/* default states */
port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
- port->partner_state = STATE_LACP_ACTIVE;
+ port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
port->sm_flags = SM_FLAGS_BEGIN;
/* use this port as agregator */
@@ -886,7 +971,10 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
RTE_ASSERT(port->rx_ring == NULL);
RTE_ASSERT(port->tx_ring == NULL);
- socket_id = rte_eth_devices[slave_id].data->numa_node;
+
+ socket_id = rte_eth_dev_socket_id(slave_id);
+ if (socket_id == (int)LCORE_ID_ANY)
+ socket_id = rte_socket_id();
element_size = sizeof(struct slow_protocol_frame) +
RTE_PKTMBUF_HEADROOM;
@@ -905,7 +993,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
0, element_size, socket_id);
- /* Any memory allocation failure in initalization is critical because
+ /* Any memory allocation failure in initialization is critical because
* resources can't be free, so reinitialization is impossible. */
if (port->mbuf_pool == NULL) {
rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
@@ -1037,6 +1125,18 @@ bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev,
}
static void
+bond_mode_8023ad_conf_get_v1708(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ bond_mode_8023ad_conf_get(dev, conf);
+ conf->slowrx_cb = mode4->slowrx_cb;
+ conf->agg_selection = mode4->agg_selection;
+}
+
+static void
bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
{
conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
@@ -1048,6 +1148,7 @@ bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
conf->slowrx_cb = NULL;
+ conf->agg_selection = AGG_STABLE;
}
static void
@@ -1064,6 +1165,10 @@ bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
mode4->update_timeout_us = conf->update_timeout_ms * 1000;
+
+ mode4->dedicated_queues.enabled = 0;
+ mode4->dedicated_queues.rx_qid = UINT16_MAX;
+ mode4->dedicated_queues.tx_qid = UINT16_MAX;
}
static void
@@ -1102,7 +1207,29 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev,
bond_mode_8023ad_stop(dev);
bond_mode_8023ad_conf_assign(mode4, conf);
+
+
+ if (dev->data->dev_started)
+ bond_mode_8023ad_start(dev);
+}
+
+static void
+bond_mode_8023ad_setup_v1708(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_bond_8023ad_conf def_conf;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ if (conf == NULL) {
+ conf = &def_conf;
+ bond_mode_8023ad_conf_get_default(conf);
+ }
+
+ bond_mode_8023ad_stop(dev);
+ bond_mode_8023ad_conf_assign(mode4, conf);
mode4->slowrx_cb = conf->slowrx_cb;
+ mode4->agg_selection = AGG_STABLE;
if (dev->data->dev_started)
bond_mode_8023ad_start(dev);
@@ -1188,18 +1315,36 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
- if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) {
- /* reset timer */
- port->rx_marker_timer = 0;
- wrn = WRN_TX_QUEUE_FULL;
- goto free_out;
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->tx_ring, pkt);
+ if (retval != 0) {
+ /* reset timer */
+ port->rx_marker_timer = 0;
+ wrn = WRN_TX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else {
+ /* Send packet directly to the slow queue */
+ uint16_t tx_count = rte_eth_tx_burst(slave_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ &pkt, 1);
+ if (tx_count != 1) {
+ /* reset timer */
+ port->rx_marker_timer = 0;
+ wrn = WRN_TX_QUEUE_FULL;
+ goto free_out;
+ }
}
} else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
- if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) {
- /* If RX fing full free lacpdu message and drop packet */
- wrn = WRN_RX_QUEUE_FULL;
- goto free_out;
- }
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ int retval = rte_ring_enqueue(port->rx_ring, pkt);
+ if (retval != 0) {
+ /* If RX fing full free lacpdu message and drop packet */
+ wrn = WRN_RX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else
+ rx_machine_update(internals, slave_id, pkt);
} else {
wrn = WRN_UNKNOWN_SLOW_TYPE;
goto free_out;
@@ -1246,10 +1391,71 @@ rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
bond_mode_8023ad_conf_get_v1607(bond_dev, conf);
return 0;
}
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
+VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
+
+int
+rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+
+ if (conf == NULL)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_conf_get_v1708(bond_dev, conf);
+ return 0;
+}
MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf),
- rte_eth_bond_8023ad_conf_get_v1607);
+ rte_eth_bond_8023ad_conf_get_v1708);
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1708, 17.08);
+
+int
+rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+ enum rte_bond_8023ad_agg_selection agg_selection)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ bond_dev = &rte_eth_devices[port_id];
+ internals = bond_dev->data->dev_private;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+ if (internals->mode != 4)
+ return -EINVAL;
+
+ mode4 = &internals->mode4;
+ if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
+ || agg_selection == AGG_STABLE)
+ mode4->agg_selection = agg_selection;
+ return 0;
+}
+
+int rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ bond_dev = &rte_eth_devices[port_id];
+ internals = bond_dev->data->dev_private;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+ if (internals->mode != 4)
+ return -EINVAL;
+ mode4 = &internals->mode4;
+
+ return mode4->agg_selection;
+}
+
+
static int
bond_8023ad_setup_validate(uint8_t port_id,
@@ -1310,10 +1516,34 @@ rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
return 0;
}
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
+VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
+
+
+int
+rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+ int err;
+
+ err = bond_8023ad_setup_validate(port_id, conf);
+ if (err != 0)
+ return err;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_setup_v1708(bond_dev, conf);
+
+ return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1708, 17.08);
MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf),
- rte_eth_bond_8023ad_setup_v1607);
+ rte_eth_bond_8023ad_setup_v1708);
+
+
+
+
+
int
rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
@@ -1504,3 +1734,49 @@ bond_mode_8023ad_ext_periodic_cb(void *arg)
rte_eal_alarm_set(internals->mode4.update_timeout_us,
bond_mode_8023ad_ext_periodic_cb, arg);
}
+
+int
+rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port)
+{
+ int retval = 0;
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ dev->data->dev_private;
+
+ if (check_for_bonded_ethdev(dev) != 0)
+ return -1;
+
+ if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
+ return -1;
+
+ /* Device must be stopped to set up slow queue */
+ if (dev->data->dev_started)
+ return -1;
+
+ internals->mode4.dedicated_queues.enabled = 1;
+
+ bond_ethdev_mode_set(dev, internals->mode);
+ return retval;
+}
+
+int
+rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port)
+{
+ int retval = 0;
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ dev->data->dev_private;
+
+ if (check_for_bonded_ethdev(dev) != 0)
+ return -1;
+
+ /* Device must be stopped to set up slow queue */
+ if (dev->data->dev_started)
+ return -1;
+
+ internals->mode4.dedicated_queues.enabled = 0;
+
+ bond_ethdev_mode_set(dev, internals->mode);
+
+ return retval;
+}
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h
index 6b8ff575..1d353c73 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -73,6 +73,12 @@ enum rte_bond_8023ad_selection {
SELECTED
};
+enum rte_bond_8023ad_agg_selection {
+ AGG_BANDWIDTH,
+ AGG_COUNT,
+ AGG_STABLE
+};
+
/** Generic slow protocol structure */
struct slow_protocol {
uint8_t subtype;
@@ -161,6 +167,7 @@ struct rte_eth_bond_8023ad_conf {
uint32_t rx_marker_period_ms;
uint32_t update_timeout_ms;
rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
+ enum rte_bond_8023ad_agg_selection agg_selection;
};
struct rte_eth_bond_8023ad_slave_info {
@@ -193,6 +200,9 @@ rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id,
int
rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
/**
* @internal
@@ -214,6 +224,9 @@ rte_eth_bond_8023ad_setup_v20(uint8_t port_id,
int
rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
/**
* @internal
@@ -302,4 +315,65 @@ int
rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
struct rte_mbuf *lacp_pkt);
+/**
+ * Enable dedicated hw queues for 802.3ad control plane traffic on on slaves
+ *
+ * This function creates an additional tx and rx queue on each slave for
+ * dedicated 802.3ad control plane traffic . A flow filtering rule is
+ * programmed on each slave to redirect all LACP slow packets to that rx queue
+ * for processing in the LACP state machine, this removes the need to filter
+ * these packets in the bonded devices data path. The additional tx queue is
+ * used to enable the LACP state machine to enqueue LACP packets directly to
+ * slave hw independently of the bonded devices data path.
+ *
+ * To use this feature all slaves must support the programming of the flow
+ * filter rule required for rx and have enough queues that one rx and tx queue
+ * can be reserved for the LACP state machines control packets.
+ *
+ * Bonding port must be stopped to change this configuration.
+ *
+ * @param port_id Bonding device id
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port_id);
+
+/**
+ * Disable slow queue on slaves
+ *
+ * This function disables hardware slow packet filter.
+ *
+ * Bonding port must be stopped to change this configuration.
+ *
+ * @see rte_eth_bond_8023ad_slow_pkt_hw_filter_enable
+ *
+ * @param port_id Bonding device id
+ * @return
+ * 0 on success, negative value otherwise.
+ *
+ */
+int
+rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port_id);
+
+/*
+ * Get aggregator mode for 8023ad
+ * @param port_id Bonding device id
+ *
+ * @return
+ * agregator mode on success, negative value otherwise
+ */
+int
+rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id);
+
+/**
+ * Set aggregator mode for 8023ad
+ * @param port_id Bonding device id
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+ enum rte_bond_8023ad_agg_selection agg_selection);
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index ca8858be..d46e44a8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -39,6 +39,7 @@
#include <rte_ether.h>
#include <rte_byteorder.h>
#include <rte_atomic.h>
+#include <rte_flow.h>
#include "rte_eth_bond_8023ad.h"
@@ -162,6 +163,9 @@ struct port {
uint64_t warning_timer;
volatile uint16_t warnings_to_show;
+
+ /** Memory pool used to allocate slow queues */
+ struct rte_mempool *slow_pool;
};
struct mode8023ad_private {
@@ -175,6 +179,23 @@ struct mode8023ad_private {
uint64_t update_timeout_us;
rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
uint8_t external_sm;
+
+ struct rte_eth_link slave_link;
+ /***< slave link properties */
+
+ /**
+ * Configuration of dedicated hardware queues for control plane
+ * traffic
+ */
+ struct {
+ uint8_t enabled;
+
+ struct rte_flow *flow[RTE_MAX_ETHPORTS];
+
+ uint16_t rx_qid;
+ uint16_t tx_qid;
+ } dedicated_queues;
+ enum rte_bond_8023ad_agg_selection agg_selection;
};
/**
@@ -295,4 +316,14 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint8_t slave_pos);
void
bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev);
+int
+bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
+ uint8_t slave_port);
+
+int
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port);
+
+int
+bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id);
+
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index 38f5c4d4..d9d37495 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -80,7 +80,8 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
* The value is chosen to be cache aligned.
*/
data_size = 256 + RTE_PKTMBUF_HEADROOM;
- snprintf(mem_name, sizeof(mem_name), "%s_MODE6", bond_dev->data->name);
+ snprintf(mem_name, sizeof(mem_name), "%s_ALB",
+ bond_dev->device->name);
internals->mode6.mempool = rte_pktmbuf_pool_create(mem_name,
512 * RTE_MAX_ETHPORTS,
RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
@@ -89,7 +90,7 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
if (internals->mode6.mempool == NULL) {
RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n",
- bond_dev->data->name);
+ bond_dev->device->name);
goto mempool_alloc_error;
}
}
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 36ec65d6..de1d9e0d 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,11 +48,11 @@ int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
{
/* Check valid pointer */
- if (eth_dev->data->drv_name == NULL)
+ if (eth_dev->device->driver->name == NULL)
return -1;
/* return 0 if driver name matches */
- return eth_dev->data->drv_name != pmd_bond_drv.driver.name;
+ return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
}
int
@@ -63,13 +63,18 @@ valid_bonded_port_id(uint8_t port_id)
}
int
-valid_slave_port_id(uint8_t port_id)
+valid_slave_port_id(uint8_t port_id, uint8_t mode)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
/* Verify that port_id refers to a non bonded port */
- if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0)
+ if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0 &&
+ mode == BONDING_MODE_8023AD) {
+ RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad"
+ " mode as slave is also a bonded device, only "
+ "physical devices can be support in this mode.");
return -1;
+ }
return 0;
}
@@ -143,22 +148,6 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
}
}
-uint8_t
-number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
-
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
-
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
@@ -251,12 +240,12 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
struct rte_eth_link link_props;
struct rte_eth_dev_info dev_info;
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
-
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
internals = bonded_eth_dev->data->dev_private;
+ if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
+ return -1;
+
slave_eth_dev = &rte_eth_devices[slave_port_id];
if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
@@ -313,6 +302,9 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
internals->tx_offload_capa &= dev_info.tx_offload_capa;
internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
+ link_properties_valid(bonded_eth_dev,
+ &slave_eth_dev->data->dev_link);
+
/* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
* the power of 2, the lower one is GCD
*/
@@ -402,12 +394,12 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
struct rte_eth_dev *slave_eth_dev;
int i, slave_idx;
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
-
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
internals = bonded_eth_dev->data->dev_private;
+ if (valid_slave_port_id(slave_port_id, internals->mode) < 0)
+ return -1;
+
/* first remove from active slave list */
slave_idx = find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_port_id);
@@ -455,9 +447,6 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
}
if (internals->active_slave_count < 1) {
- /* reset device link properties as no slaves are active */
- link_properties_reset(&rte_eth_devices[bonded_port_id]);
-
/* if no slaves are any longer attached to bonded device and MAC is not
* user defined then clear MAC of bonded device as it will be reset
* when a new slave is added */
@@ -528,10 +517,10 @@ rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
if (valid_bonded_port_id(bonded_port_id) != 0)
return -1;
- if (valid_slave_port_id(slave_port_id) != 0)
- return -1;
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
- internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
+ return -1;
internals->user_defined_primary_port = 1;
internals->primary_port = slave_port_id;
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index e3bdad9d..bb634c62 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -32,6 +32,7 @@
*/
#include <rte_devargs.h>
+#include <rte_pci.h>
#include <rte_kvargs.h>
#include <cmdline_parse.h>
@@ -47,6 +48,7 @@ const char *pmd_bond_init_valid_arguments[] = {
PMD_BOND_XMIT_POLICY_KVARG,
PMD_BOND_SOCKET_ID_KVARG,
PMD_BOND_MAC_ADDR_KVARG,
+ PMD_BOND_AGG_MODE_KVARG,
"driver",
NULL
};
@@ -69,7 +71,7 @@ find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
rte_eth_devices[i].data->kdrv == RTE_KDRV_NONE)
continue;
- pci_dev = RTE_DEV_TO_PCI(rte_eth_devices[i].device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]);
eth_pci_addr = &pci_dev->addr;
if (pci_addr->bus == eth_pci_addr->bus &&
@@ -90,7 +92,7 @@ find_port_id_by_dev_name(const char *name)
if (rte_eth_devices[i].data == NULL)
continue;
- if (strcmp(rte_eth_devices[i].data->name, name) == 0)
+ if (strcmp(rte_eth_devices[i].device->name, name) == 0)
return i;
}
return -1;
@@ -134,7 +136,7 @@ parse_port_id(const char *port_str)
}
int
-bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_slave_port_kvarg(const char *key,
const char *value, void *extra_args)
{
struct bond_ethdev_slave_ports *slave_ports;
@@ -190,6 +192,38 @@ bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
}
int
+bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint8_t *agg_mode;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ agg_mode = extra_args;
+
+ errno = 0;
+ if (strncmp(value, "stable", 6) == 0)
+ *agg_mode = AGG_STABLE;
+
+ if (strncmp(value, "bandwidth", 9) == 0)
+ *agg_mode = AGG_BANDWIDTH;
+
+ if (strncmp(value, "count", 5) == 0)
+ *agg_mode = AGG_COUNT;
+
+ switch (*agg_mode) {
+ case AGG_STABLE:
+ case AGG_BANDWIDTH:
+ case AGG_COUNT:
+ return 0;
+ default:
+ RTE_BOND_LOG(ERR, "Invalid agg mode value stable/bandwidth/count");
+ return -1;
+ }
+}
+
+int
bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
const char *value, void *extra_args)
{
@@ -204,8 +238,8 @@ bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
if (*endptr != 0 || errno != 0)
return -1;
- /* validate mode value */
- if (socket_id >= 0 && socket_id < number_of_sockets()) {
+ /* validate socket id value */
+ if (socket_id >= 0) {
*(uint8_t *)extra_args = (uint8_t)socket_id;
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 82959abc..3ee70baa 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -133,6 +133,254 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
(subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
}
+/*****************************************************************************
+ * Flow director's setup for mode 4 optimization
+ */
+
+static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
+ .dst.addr_bytes = { 0 },
+ .src.addr_bytes = { 0 },
+ .type = RTE_BE16(ETHER_TYPE_SLOW),
+};
+
+static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
+ .dst.addr_bytes = { 0 },
+ .src.addr_bytes = { 0 },
+ .type = 0xFFFF,
+};
+
+static struct rte_flow_item flow_item_8023ad[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &flow_item_eth_type_8023ad,
+ .last = NULL,
+ .mask = &flow_item_eth_mask_type_8023ad,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ .spec = NULL,
+ .last = NULL,
+ .mask = NULL,
+ }
+};
+
+const struct rte_flow_attr flow_attr_8023ad = {
+ .group = 0,
+ .priority = 0,
+ .ingress = 1,
+ .egress = 0,
+ .reserved = 0,
+};
+
+int
+bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
+ uint8_t slave_port) {
+ struct rte_flow_error error;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+
+ struct rte_flow_action_queue lacp_queue_conf = {
+ .index = internals->mode4.dedicated_queues.rx_qid,
+ };
+
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &lacp_queue_conf
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
+ flow_item_8023ad, actions, &error);
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+int
+bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id) {
+ struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+ struct rte_eth_dev_info bond_info, slave_info;
+ uint8_t idx;
+
+ /* Verify if all slaves in bonding supports flow director and */
+ if (internals->slave_count > 0) {
+ rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
+
+ internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
+ internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
+
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ rte_eth_dev_info_get(internals->slaves[idx].port_id,
+ &slave_info);
+
+ if (bond_ethdev_8023ad_flow_verify(bond_dev,
+ internals->slaves[idx].port_id) != 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port) {
+
+ struct rte_flow_error error;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ (bond_dev->data->dev_private);
+
+ struct rte_flow_action_queue lacp_queue_conf = {
+ .index = internals->mode4.dedicated_queues.rx_qid,
+ };
+
+ const struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &lacp_queue_conf
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
+ &flow_attr_8023ad, flow_item_8023ad, actions, &error);
+ if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
+ RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
+ "(slave_port=%d queue_id=%d)",
+ error.message, slave_port,
+ internals->mode4.dedicated_queues.rx_qid);
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint16_t
+bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+ struct bond_dev_private *internals = bd_rx_q->dev_private;
+ uint16_t num_rx_total = 0; /* Total number of received packets */
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint8_t slave_count;
+
+ uint8_t i, idx;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * slave_count);
+
+ for (i = 0, idx = internals->active_slave;
+ i < slave_count && num_rx_total < nb_pkts; i++, idx++) {
+ idx = idx % slave_count;
+
+ /* Read packets from this slave */
+ num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
+ &bufs[num_rx_total], nb_pkts - num_rx_total);
+ }
+
+ internals->active_slave = idx;
+
+ return num_rx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ uint8_t num_of_slaves;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ /* positions in slaves, not ID */
+ uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
+ uint8_t distributing_count;
+
+ uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
+ uint16_t i, op_slave_idx;
+
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
+
+ /* Total amount of packets in slave_bufs */
+ uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+ /* Slow packets placed in each slave */
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) *
+ num_of_slaves);
+
+ distributing_count = 0;
+ for (i = 0; i < num_of_slaves; i++) {
+ struct port *port = &mode_8023ad_ports[slaves[i]];
+ if (ACTOR_STATE(port, DISTRIBUTING))
+ distributing_offsets[distributing_count++] = i;
+ }
+
+ if (likely(distributing_count > 0)) {
+ /* Populate slaves mbuf with the packets which are to be sent */
+ for (i = 0; i < nb_pkts; i++) {
+ /* Select output slave using hash based on xmit policy */
+ op_slave_idx = internals->xmit_hash(bufs[i],
+ distributing_count);
+
+ /* Populate slave mbuf arrays with mbufs for that slave.
+ * Use only slaves that are currently distributing.
+ */
+ uint8_t slave_offset =
+ distributing_offsets[op_slave_idx];
+ slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] =
+ bufs[i];
+ slave_nb_pkts[slave_offset]++;
+ }
+ }
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < num_of_slaves; i++) {
+ if (slave_nb_pkts[i] == 0)
+ continue;
+
+ num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ slave_bufs[i], slave_nb_pkts[i]);
+
+ num_tx_total += num_tx_slave;
+ num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
+ uint16_t j = nb_pkts - num_tx_fail_total;
+ for ( ; num_tx_slave < slave_nb_pkts[i]; j++,
+ num_tx_slave++)
+ bufs[j] = slave_bufs[i][num_tx_slave];
+ }
+ }
+
+ return num_tx_total;
+}
+
+
static uint16_t
bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
@@ -180,6 +428,13 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* Handle slow protocol packets. */
while (j < num_rx_total) {
+
+ /* If packet is not pure L2 and is known, skip it */
+ if ((bufs[j]->packet_type & ~RTE_PTYPE_L2_ETHER) != 0) {
+ j++;
+ continue;
+ }
+
if (j + 3 < num_rx_total)
rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
@@ -187,7 +442,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
/* Remove packet from array if it is slow packet or slave is not
- * in collecting state or bondign interface is not in promiscus
+ * in collecting state or bonding interface is not in promiscuous
* mode and packet address does not match. */
if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
!collecting || (!promisc &&
@@ -654,7 +909,7 @@ bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
{
struct rte_eth_link link_status;
- rte_eth_link_get(port_id, &link_status);
+ rte_eth_link_get_nowait(port_id, &link_status);
uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
if (link_bwg == 0)
return;
@@ -793,8 +1048,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
/*
- * We create separate transmit buffers for update packets as they wont be
- * counted in num_tx_total.
+ * We create separate transmit buffers for update packets as they won't
+ * be counted in num_tx_total.
*/
struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
@@ -1131,39 +1386,44 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
}
void
-link_properties_set(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_link *slave_dev_link)
+link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
{
- struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
- if (slave_dev_link->link_status &&
- bonded_eth_dev->data->dev_started) {
- bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
- bonded_dev_link->link_speed = slave_dev_link->link_speed;
+ if (bond_ctx->mode == BONDING_MODE_8023AD) {
+ /**
+ * If in mode 4 then save the link properties of the first
+ * slave, all subsequent slaves must match these properties
+ */
+ struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
- internals->link_props_set = 1;
+ bond_link->link_autoneg = slave_link->link_autoneg;
+ bond_link->link_duplex = slave_link->link_duplex;
+ bond_link->link_speed = slave_link->link_speed;
+ } else {
+ /**
+ * In any other mode the link properties are set to default
+ * values of AUTONEG/DUPLEX
+ */
+ ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
+ ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
}
}
-void
-link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
+int
+link_properties_valid(struct rte_eth_dev *ethdev,
+ struct rte_eth_link *slave_link)
{
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
- memset(&(bonded_eth_dev->data->dev_link), 0,
- sizeof(bonded_eth_dev->data->dev_link));
+ if (bond_ctx->mode == BONDING_MODE_8023AD) {
+ struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
- internals->link_props_set = 0;
-}
-
-int
-link_properties_valid(struct rte_eth_link *bonded_dev_link,
- struct rte_eth_link *slave_dev_link)
-{
- if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
- bonded_dev_link->link_speed != slave_dev_link->link_speed)
- return -1;
+ if (bond_link->link_duplex != slave_link->link_duplex ||
+ bond_link->link_autoneg != slave_link->link_autoneg ||
+ bond_link->link_speed != slave_link->link_speed)
+ return -1;
+ }
return 0;
}
@@ -1295,11 +1555,19 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
if (bond_mode_8023ad_enable(eth_dev) != 0)
return -1;
- eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
- eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
- RTE_LOG(WARNING, PMD,
- "Using mode 4, it is necessary to do TX burst and RX burst "
- "at least every 100ms.\n");
+ if (internals->mode4.dedicated_queues.enabled == 0) {
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
+ RTE_LOG(WARNING, PMD,
+ "Using mode 4, it is necessary to do TX burst "
+ "and RX burst at least every 100ms.\n");
+ } else {
+ /* Use flow director's optimization */
+ eth_dev->rx_pkt_burst =
+ bond_ethdev_rx_burst_8023ad_fast_queue;
+ eth_dev->tx_pkt_burst =
+ bond_ethdev_tx_burst_8023ad_fast_queue;
+ }
break;
case BONDING_MODE_TLB:
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
@@ -1321,15 +1589,81 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
return 0;
}
+
+static int
+slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ int errval = 0;
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ bonded_eth_dev->data->dev_private;
+ struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id];
+
+ if (port->slow_pool == NULL) {
+ char mem_name[256];
+ int slave_id = slave_eth_dev->data->port_id;
+
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
+ slave_id);
+ port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
+ 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ slave_eth_dev->data->numa_node);
+
+ /* Any memory allocation failure in initialization is critical because
+ * resources can't be free, so reinitialization is impossible. */
+ if (port->slow_pool == NULL) {
+ rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
+ slave_id, mem_name, rte_strerror(rte_errno));
+ }
+ }
+
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ /* Configure slow Rx queue */
+
+ errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.rx_qid, 128,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ NULL, port->slow_pool);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.rx_qid,
+ errval);
+ return errval;
+ }
+
+ errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.tx_qid, 512,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ NULL);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.tx_qid,
+ errval);
+ return errval;
+ }
+ }
+ return 0;
+}
+
int
slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_dev *slave_eth_dev)
{
struct bond_rx_queue *bd_rx_q;
struct bond_tx_queue *bd_tx_q;
+ uint16_t nb_rx_queues;
+ uint16_t nb_tx_queues;
int errval;
uint16_t q_id;
+ struct rte_flow_error flow_error;
+
+ struct bond_dev_private *internals = (struct bond_dev_private *)
+ bonded_eth_dev->data->dev_private;
/* Stop slave */
rte_eth_dev_stop(slave_eth_dev->data->port_id);
@@ -1359,10 +1693,19 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
+ nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
+ nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ nb_rx_queues++;
+ nb_tx_queues++;
+ }
+ }
+
/* Configure device */
errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
- bonded_eth_dev->data->nb_rx_queues,
- bonded_eth_dev->data->nb_tx_queues,
+ nb_rx_queues, nb_tx_queues,
&(slave_eth_dev->data->dev_conf));
if (errval != 0) {
RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
@@ -1396,12 +1739,35 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
&bd_tx_q->tx_conf);
if (errval != 0) {
RTE_BOND_LOG(ERR,
- "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
- slave_eth_dev->data->port_id, q_id, errval);
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
return errval;
}
}
+ if (internals->mode == BONDING_MODE_8023AD &&
+ internals->mode4.dedicated_queues.enabled == 1) {
+ if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
+ != 0)
+ return errval;
+
+ if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
+ slave_eth_dev->data->port_id) != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
+ return -1;
+ }
+
+ if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
+ rte_flow_destroy(slave_eth_dev->data->port_id,
+ internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
+ &flow_error);
+
+ bond_ethdev_8023ad_flow_set(bonded_eth_dev,
+ slave_eth_dev->data->port_id);
+ }
+
/* Start device */
errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
if (errval != 0) {
@@ -1438,7 +1804,8 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
- RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
+ RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
+ NULL);
}
return 0;
@@ -1559,13 +1926,25 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
if (internals->promiscuous_en)
bond_ethdev_promiscuous_enable(eth_dev);
+ if (internals->mode == BONDING_MODE_8023AD) {
+ if (internals->mode4.dedicated_queues.enabled == 1) {
+ internals->mode4.dedicated_queues.rx_qid =
+ eth_dev->data->nb_rx_queues;
+ internals->mode4.dedicated_queues.tx_qid =
+ eth_dev->data->nb_tx_queues;
+ }
+ }
+
+
/* Reconfigure each slave device if starting bonded device */
for (i = 0; i < internals->slave_count; i++) {
- if (slave_configure(eth_dev,
- &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
+ struct rte_eth_dev *slave_ethdev =
+ &(rte_eth_devices[internals->slaves[i].port_id]);
+ if (slave_configure(eth_dev, slave_ethdev) != 0) {
RTE_BOND_LOG(ERR,
- "bonded port (%d) failed to reconfigure slave device (%d)",
- eth_dev->data->port_id, internals->slaves[i].port_id);
+ "bonded port (%d) failed to reconfigure slave device (%d)",
+ eth_dev->data->port_id,
+ internals->slaves[i].port_id);
return -1;
}
/* We will need to poll for link status if any slave doesn't
@@ -1666,7 +2045,7 @@ bond_ethdev_close(struct rte_eth_dev *dev)
uint8_t bond_port_id = internals->port_id;
int skipped = 0;
- RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->data->name);
+ RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name);
while (internals->slave_count != skipped) {
uint8_t port_id = internals->slaves[skipped].port_id;
@@ -1675,7 +2054,7 @@ bond_ethdev_close(struct rte_eth_dev *dev)
if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
RTE_LOG(ERR, EAL,
"Failed to remove port %d from bonded device "
- "%s\n", port_id, dev->data->name);
+ "%s\n", port_id, dev->device->name);
skipped++;
}
}
@@ -1691,14 +2070,47 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ uint16_t max_nb_rx_queues = UINT16_MAX;
+ uint16_t max_nb_tx_queues = UINT16_MAX;
+
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
- ? internals->candidate_max_rx_pktlen
- : ETHER_MAX_JUMBO_FRAME_LEN;
+ dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
+ internals->candidate_max_rx_pktlen :
+ ETHER_MAX_JUMBO_FRAME_LEN;
+
+ /* Max number of tx/rx queues that the bonded device can support is the
+ * minimum values of the bonded slaves, as all slaves must be capable
+ * of supporting the same number of tx/rx queues.
+ */
+ if (internals->slave_count > 0) {
+ struct rte_eth_dev_info slave_info;
+ uint8_t idx;
- dev_info->max_rx_queues = (uint16_t)128;
- dev_info->max_tx_queues = (uint16_t)512;
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ rte_eth_dev_info_get(internals->slaves[idx].port_id,
+ &slave_info);
+
+ if (slave_info.max_rx_queues < max_nb_rx_queues)
+ max_nb_rx_queues = slave_info.max_rx_queues;
+
+ if (slave_info.max_tx_queues < max_nb_tx_queues)
+ max_nb_tx_queues = slave_info.max_tx_queues;
+ }
+ }
+
+ dev_info->max_rx_queues = max_nb_rx_queues;
+ dev_info->max_tx_queues = max_nb_tx_queues;
+
+ /**
+ * If dedicated hw queues enabled for link bonding device in LACP mode
+ * then we need to reduce the maximum number of data path queues by 1.
+ */
+ if (internals->mode == BONDING_MODE_8023AD &&
+ internals->mode4.dedicated_queues.enabled == 1) {
+ dev_info->max_rx_queues--;
+ dev_info->max_tx_queues--;
+ }
dev_info->min_rx_bufsize = 0;
@@ -1849,7 +2261,8 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
RTE_ETH_EVENT_INTR_LSC,
- &bonded_ethdev->data->port_id);
+ &bonded_ethdev->data->port_id,
+ NULL);
}
}
rte_spinlock_unlock(&internals->lock);
@@ -1862,36 +2275,90 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
}
static int
-bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
- int wait_to_complete)
+bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
{
- struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ void (*link_update)(uint8_t port_id, struct rte_eth_link *eth_link);
- if (!bonded_eth_dev->data->dev_started ||
- internals->active_slave_count == 0) {
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ struct bond_dev_private *bond_ctx;
+ struct rte_eth_link slave_link;
+
+ uint32_t idx;
+
+ bond_ctx = ethdev->data->dev_private;
+
+ ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+
+ if (ethdev->data->dev_started == 0 ||
+ bond_ctx->active_slave_count == 0) {
+ ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
return 0;
- } else {
- struct rte_eth_dev *slave_eth_dev;
- int i, link_up = 0;
+ }
- for (i = 0; i < internals->active_slave_count; i++) {
- slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
+ ethdev->data->dev_link.link_status = ETH_LINK_UP;
- (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
- wait_to_complete);
- if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
- link_up = 1;
- break;
- }
+ if (wait_to_complete)
+ link_update = rte_eth_link_get;
+ else
+ link_update = rte_eth_link_get_nowait;
+
+ switch (bond_ctx->mode) {
+ case BONDING_MODE_BROADCAST:
+ /**
+ * Setting link speed to UINT32_MAX to ensure we pick up the
+ * value of the first active slave
+ */
+ ethdev->data->dev_link.link_speed = UINT32_MAX;
+
+ /**
+ * link speed is minimum value of all the slaves link speed as
+ * packet loss will occur on this slave if transmission at rates
+ * greater than this are attempted
+ */
+ for (idx = 1; idx < bond_ctx->active_slave_count; idx++) {
+ link_update(bond_ctx->active_slaves[0], &slave_link);
+
+ if (slave_link.link_speed <
+ ethdev->data->dev_link.link_speed)
+ ethdev->data->dev_link.link_speed =
+ slave_link.link_speed;
}
+ break;
+ case BONDING_MODE_ACTIVE_BACKUP:
+ /* Current primary slave */
+ link_update(bond_ctx->current_primary_port, &slave_link);
- bonded_eth_dev->data->dev_link.link_status = link_up;
+ ethdev->data->dev_link.link_speed = slave_link.link_speed;
+ break;
+ case BONDING_MODE_8023AD:
+ ethdev->data->dev_link.link_autoneg =
+ bond_ctx->mode4.slave_link.link_autoneg;
+ ethdev->data->dev_link.link_duplex =
+ bond_ctx->mode4.slave_link.link_duplex;
+ /* fall through to update link speed */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ /**
+ * In theses mode the maximum theoretical link speed is the sum
+ * of all the slaves
+ */
+ ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+
+ for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
+ link_update(bond_ctx->active_slaves[idx], &slave_link);
+
+ ethdev->data->dev_link.link_speed +=
+ slave_link.link_speed;
+ }
}
+
return 0;
}
+
static void
bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
@@ -1995,35 +2462,35 @@ bond_ethdev_delayed_lsc_propagation(void *arg)
return;
_rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
}
-void
+int
bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
- void *param)
+ void *param, void *ret_param __rte_unused)
{
- struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
+ struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
struct rte_eth_link link;
+ int rc = -1;
int i, valid_slave = 0;
uint8_t active_pos;
uint8_t lsc_flag = 0;
if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
- return;
+ return rc;
bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
- slave_eth_dev = &rte_eth_devices[port_id];
if (check_for_bonded_ethdev(bonded_eth_dev))
- return;
+ return rc;
internals = bonded_eth_dev->data->dev_private;
/* If the device isn't started don't handle interrupts */
if (!bonded_eth_dev->data->dev_started)
- return;
+ return rc;
/* verify that port_id is a valid slave of bonded port */
for (i = 0; i < internals->slave_count; i++) {
@@ -2034,7 +2501,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
}
if (!valid_slave)
- return;
+ return rc;
/* Search for port in active port list */
active_pos = find_slave_by_id(internals->active_slaves,
@@ -2043,7 +2510,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
rte_eth_link_get_nowait(port_id, &link);
if (link.link_status) {
if (active_pos < internals->active_slave_count)
- return;
+ return rc;
/* if no active slave ports then set this port to be primary port */
if (internals->active_slave_count < 1) {
@@ -2053,20 +2520,6 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
lsc_flag = 1;
mac_address_slaves_update(bonded_eth_dev);
-
- /* Inherit eth dev link properties from first active slave */
- link_properties_set(bonded_eth_dev,
- &(slave_eth_dev->data->dev_link));
- } else {
- if (link_properties_valid(
- &bonded_eth_dev->data->dev_link, &link) != 0) {
- slave_eth_dev->data->dev_flags &=
- (~RTE_ETH_DEV_BONDED_SLAVE);
- RTE_LOG(ERR, PMD,
- "port %u invalid speed/duplex\n",
- port_id);
- return;
- }
}
activate_slave(bonded_eth_dev, port_id);
@@ -2077,19 +2530,13 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
bond_ethdev_primary_set(internals, port_id);
} else {
if (active_pos == internals->active_slave_count)
- return;
+ return rc;
/* Remove from active slave list */
deactivate_slave(bonded_eth_dev, port_id);
- /* No active slaves, change link status to down and reset other
- * link properties */
- if (internals->active_slave_count < 1) {
+ if (internals->active_slave_count < 1)
lsc_flag = 1;
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-
- link_properties_reset(bonded_eth_dev);
- }
/* Update primary id, take first active slave from list or if none
* available set to -1 */
@@ -2102,6 +2549,12 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
}
}
+ /**
+ * Update bonded device link properties after any change to active
+ * slaves
+ */
+ bond_ethdev_link_update(bonded_eth_dev, 0);
+
if (lsc_flag) {
/* Cancel any possible outstanding interrupts if delays are enabled */
if (internals->link_up_delay_ms > 0 ||
@@ -2116,7 +2569,8 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
} else {
if (internals->link_down_delay_ms > 0)
@@ -2125,9 +2579,11 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
+ return 0;
}
static int
@@ -2272,12 +2728,6 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
* and internal (private) data
*/
- if (socket_id >= number_of_sockets()) {
- RTE_BOND_LOG(ERR,
- "Invalid socket id specified to create bonded device on.");
- goto err;
- }
-
/* reserve an ethdev entry */
eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
if (eth_dev == NULL) {
@@ -2308,7 +2758,6 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
internals->xmit_hash = xmit_l2_hash;
internals->user_defined_mac = 0;
- internals->link_props_set = 0;
internals->link_status_polling_enabled = 0;
@@ -2376,7 +2825,7 @@ bond_probe(struct rte_vdev_device *dev)
const char *name;
struct bond_dev_private *internals;
struct rte_kvargs *kvlist;
- uint8_t bonding_mode, socket_id;
+ uint8_t bonding_mode, socket_id/*, agg_mode*/;
int arg_count, port_id;
if (!dev)
@@ -2498,11 +2947,12 @@ bond_remove(struct rte_vdev_device *dev)
static int
bond_ethdev_configure(struct rte_eth_dev *dev)
{
- char *name = dev->data->name;
+ const char *name = dev->device->name;
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_kvargs *kvlist = internals->kvlist;
int arg_count;
uint8_t port_id = dev - rte_eth_devices;
+ uint8_t agg_mode;
static const uint8_t default_rss_key[40] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
@@ -2590,6 +3040,21 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
return -1;
}
+ if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_AGG_MODE_KVARG,
+ &bond_ethdev_parse_slave_agg_mode_kvarg,
+ &agg_mode) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to parse agg selection mode for bonded device %s\n",
+ name);
+ }
+ if (internals->mode == BONDING_MODE_8023AD)
+ if (agg_mode != 0)
+ rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
+ }
+
/* Parse/add slave ports to bonded device */
if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
struct bond_ethdev_slave_ports slave_ports;
@@ -2753,6 +3218,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
"primary=<ifc> "
"mode=[0-6] "
"xmit_policy=[l2 | l23 | l34] "
+ "agg_mode=[count | stable | bandwidth] "
"socket_id=<int> "
"mac=<mac addr> "
"lsc_poll_period_ms=<int> "
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index c8db0900..1fe6ff88 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -45,6 +45,7 @@
#define PMD_BOND_SLAVE_PORT_KVARG ("slave")
#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary")
#define PMD_BOND_MODE_KVARG ("mode")
+#define PMD_BOND_AGG_MODE_KVARG ("agg_mode")
#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy")
#define PMD_BOND_SOCKET_ID_KVARG ("socket_id")
#define PMD_BOND_MAC_ADDR_KVARG ("mac")
@@ -132,8 +133,7 @@ struct bond_dev_private {
/**< Flag for whether MAC address is user defined or not */
uint8_t promiscuous_en;
/**< Enabled/disable promiscuous mode on bonding device */
- uint8_t link_props_set;
- /**< flag to denote if the link properties are set */
+
uint8_t link_status_polling_enabled;
uint32_t link_status_polling_interval_ms;
@@ -184,7 +184,7 @@ extern const struct eth_dev_ops default_dev_ops;
int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
-/* Search given slave array to find possition of given id.
+/* Search given slave array to find position of given id.
* Return slave pos or slaves_count if not found. */
static inline uint8_t
find_slave_by_id(uint8_t *slaves, uint8_t slaves_count, uint8_t slave_id) {
@@ -205,7 +205,7 @@ int
valid_bonded_port_id(uint8_t port_id);
int
-valid_slave_port_id(uint8_t port_id);
+valid_slave_port_id(uint8_t port_id, uint8_t mode);
void
deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
@@ -216,11 +216,8 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
void
link_properties_set(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_link *slave_dev_link);
-void
-link_properties_reset(struct rte_eth_dev *bonded_eth_dev);
-
int
-link_properties_valid(struct rte_eth_link *bonded_dev_link,
+link_properties_valid(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_link *slave_dev_link);
int
@@ -232,9 +229,6 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr);
int
mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
-uint8_t
-number_of_sockets(void);
-
int
bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
@@ -263,36 +257,40 @@ void
bond_ethdev_primary_set(struct bond_dev_private *internals,
uint8_t slave_port_id);
-void
+int
bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
- void *param);
+ void *param, void *ret_param);
+
+int
+bond_ethdev_parse_slave_port_kvarg(const char *key,
+ const char *value, void *extra_args);
int
-bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_slave_mode_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
const char *value, void *extra_args);
int
-bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_socket_id_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_bond_mac_addr_kvarg(const char *key,
const char *value, void *extra_args);
int
-bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused,
+bond_ethdev_parse_time_ms_kvarg(const char *key,
const char *value, void *extra_args);
void
diff --git a/drivers/net/bonding/rte_eth_bond_version.map b/drivers/net/bonding/rte_eth_bond_version.map
index 2de0a7d3..0f4e847d 100644
--- a/drivers/net/bonding/rte_eth_bond_version.map
+++ b/drivers/net/bonding/rte_eth_bond_version.map
@@ -43,3 +43,16 @@ DPDK_16.07 {
rte_eth_bond_8023ad_setup;
} DPDK_16.04;
+
+DPDK_17.08 {
+ global:
+
+ rte_eth_bond_8023ad_dedicated_queues_enable;
+ rte_eth_bond_8023ad_dedicated_queues_disable;
+ rte_eth_bond_8023ad_agg_selection_get;
+ rte_eth_bond_8023ad_agg_selection_set;
+ rte_eth_bond_8023ad_conf_get;
+ rte_eth_bond_8023ad_setup;
+
+
+} DPDK_16.07;
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index 26807900..5e5f221e 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -148,6 +148,7 @@ struct sge_rspq { /* state for an SGE response queue */
void __iomem *bar2_addr; /* address of BAR2 Queue registers */
unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+ struct sge_qstat *stat;
unsigned int cidx; /* consumer index */
unsigned int gts_idx; /* last gts write sent */
@@ -459,7 +460,10 @@ static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_LIST_ID 0 /* Capability ID */
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
+#define PCI_EXP_DEVCTL 0x0008 /* Device control */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */
#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
@@ -706,7 +710,8 @@ void reclaim_completed_tx(struct sge_txq *q);
void t4_free_sge_resources(struct adapter *adap);
void t4_sge_tx_monitor_start(struct adapter *adap);
void t4_sge_tx_monitor_stop(struct adapter *adap);
-int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf);
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_sge_init(struct adapter *adap);
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 11f139c9..1eda57d0 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +68,12 @@ enum {
PAUSE_AUTONEG = 1 << 2
};
+enum {
+ FEC_RS = 1 << 0,
+ FEC_BASER_RS = 1 << 1,
+ FEC_RESERVED = 1 << 2,
+};
+
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
@@ -151,6 +157,11 @@ struct tp_params {
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+ /* cached TP_OUT_CONFIG compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ int rx_pkt_encap;
+
/*
* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
* subset of the set of fields which may be present in the Compressed
@@ -210,7 +221,9 @@ struct adapter_params {
unsigned int sf_nsec; /* # of flash sectors */
unsigned int fw_vers;
+ unsigned int bs_vers;
unsigned int tp_vers;
+ unsigned int er_vers;
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
@@ -231,10 +244,12 @@ struct adapter_params {
struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
+ unsigned char requested_fec; /* Forward Error Correction user */
+ unsigned char fec; /* has requested and actual FEC */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
};
@@ -272,6 +287,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fl_pkt_align(struct adapter *adap);
int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size,
enum chip_type chip_compat);
@@ -374,7 +390,8 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_flash_cfg_addr(struct adapter *adapter);
-unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx);
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_get_port_stats_offset(struct adapter *adap, int idx,
@@ -382,9 +399,10 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
struct port_stats *offset);
void t4_clr_port_stats(struct adapter *adap, int idx);
void t4_reset_link_config(struct adapter *adap, int idx);
-int t4_get_fw_version(struct adapter *adapter, u32 *vers);
-int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_get_version_info(struct adapter *adapter);
+void t4_dump_version_info(struct adapter *adapter);
int t4_get_flash_params(struct adapter *adapter);
+int t4_get_chip_type(struct adapter *adap, int ver);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
int t4_init_rss_mode(struct adapter *adap, int mbox);
diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h
index 1ca68039..cd7a9282 100644
--- a/drivers/net/cxgbe/base/t4_chip_type.h
+++ b/drivers/net/cxgbe/base/t4_chip_type.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,7 @@
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
@@ -64,6 +65,10 @@ enum chip_type {
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
static inline int is_t4(enum chip_type chip)
@@ -76,4 +81,8 @@ static inline int is_t5(enum chip_type chip)
return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5);
}
+static inline int is_t6(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6);
+}
#endif /* __T4_CHIP_TYPE_H__ */
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 19afdac9..a8ccea00 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,6 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -57,7 +56,8 @@
#include "t4_regs_values.h"
#include "t4fw_interface.h"
-static void init_link_config(struct link_config *lc, unsigned int caps);
+static void init_link_config(struct link_config *lc, unsigned int pcaps,
+ unsigned int acaps);
/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
@@ -584,6 +584,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
switch (chip_version) {
case CHELSIO_T5:
+ case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
@@ -1379,6 +1380,567 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x51300, 0x51308,
};
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x114c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x1274,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f00,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x619c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d84,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de4,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9100, 0x9124,
+ 0x9400, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xd100, 0xd118,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0xf010, 0xf018,
+ 0xf020, 0xf028,
+ 0x11000, 0x11014,
+ 0x11048, 0x1106c,
+ 0x11074, 0x11088,
+ 0x11098, 0x11120,
+ 0x1112c, 0x1117c,
+ 0x11190, 0x112e0,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1206c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x192a4, 0x192b0,
+ 0x192bc, 0x192bc,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19418,
+ 0x19420, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cbc,
+ 0x19ce4, 0x19ce4,
+ 0x19cf0, 0x19cf8,
+ 0x19d00, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30168,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30320,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x308a0,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a14,
+ 0x30a1c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d14,
+ 0x30d1c, 0x30d3c,
+ 0x30d44, 0x30d4c,
+ 0x30d54, 0x30d74,
+ 0x30d7c, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x321f4,
+ 0x32200, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b38,
+ 0x32b20, 0x32b38,
+ 0x32b40, 0x32b58,
+ 0x32b60, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x33000, 0x3302c,
+ 0x33034, 0x33050,
+ 0x33058, 0x33058,
+ 0x33060, 0x3308c,
+ 0x3309c, 0x330ac,
+ 0x330c0, 0x330c0,
+ 0x330c8, 0x330d0,
+ 0x330d8, 0x330e0,
+ 0x330ec, 0x3312c,
+ 0x33134, 0x33150,
+ 0x33158, 0x33158,
+ 0x33160, 0x3318c,
+ 0x3319c, 0x331ac,
+ 0x331c0, 0x331c0,
+ 0x331c8, 0x331d0,
+ 0x331d8, 0x331e0,
+ 0x331ec, 0x33290,
+ 0x33298, 0x332c4,
+ 0x332e4, 0x33390,
+ 0x33398, 0x333c4,
+ 0x333e4, 0x3342c,
+ 0x33434, 0x33450,
+ 0x33458, 0x33458,
+ 0x33460, 0x3348c,
+ 0x3349c, 0x334ac,
+ 0x334c0, 0x334c0,
+ 0x334c8, 0x334d0,
+ 0x334d8, 0x334e0,
+ 0x334ec, 0x3352c,
+ 0x33534, 0x33550,
+ 0x33558, 0x33558,
+ 0x33560, 0x3358c,
+ 0x3359c, 0x335ac,
+ 0x335c0, 0x335c0,
+ 0x335c8, 0x335d0,
+ 0x335d8, 0x335e0,
+ 0x335ec, 0x33690,
+ 0x33698, 0x336c4,
+ 0x336e4, 0x33790,
+ 0x33798, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x3392c,
+ 0x33934, 0x33950,
+ 0x33958, 0x33958,
+ 0x33960, 0x3398c,
+ 0x3399c, 0x339ac,
+ 0x339c0, 0x339c0,
+ 0x339c8, 0x339d0,
+ 0x339d8, 0x339e0,
+ 0x339ec, 0x33a90,
+ 0x33a98, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b28,
+ 0x33b38, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c28,
+ 0x33c38, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34168,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34320,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x348a0,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a14,
+ 0x34a1c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d14,
+ 0x34d1c, 0x34d3c,
+ 0x34d44, 0x34d4c,
+ 0x34d54, 0x34d74,
+ 0x34d7c, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x361f4,
+ 0x36200, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b38,
+ 0x36b20, 0x36b38,
+ 0x36b40, 0x36b58,
+ 0x36b60, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x37000, 0x3702c,
+ 0x37034, 0x37050,
+ 0x37058, 0x37058,
+ 0x37060, 0x3708c,
+ 0x3709c, 0x370ac,
+ 0x370c0, 0x370c0,
+ 0x370c8, 0x370d0,
+ 0x370d8, 0x370e0,
+ 0x370ec, 0x3712c,
+ 0x37134, 0x37150,
+ 0x37158, 0x37158,
+ 0x37160, 0x3718c,
+ 0x3719c, 0x371ac,
+ 0x371c0, 0x371c0,
+ 0x371c8, 0x371d0,
+ 0x371d8, 0x371e0,
+ 0x371ec, 0x37290,
+ 0x37298, 0x372c4,
+ 0x372e4, 0x37390,
+ 0x37398, 0x373c4,
+ 0x373e4, 0x3742c,
+ 0x37434, 0x37450,
+ 0x37458, 0x37458,
+ 0x37460, 0x3748c,
+ 0x3749c, 0x374ac,
+ 0x374c0, 0x374c0,
+ 0x374c8, 0x374d0,
+ 0x374d8, 0x374e0,
+ 0x374ec, 0x3752c,
+ 0x37534, 0x37550,
+ 0x37558, 0x37558,
+ 0x37560, 0x3758c,
+ 0x3759c, 0x375ac,
+ 0x375c0, 0x375c0,
+ 0x375c8, 0x375d0,
+ 0x375d8, 0x375e0,
+ 0x375ec, 0x37690,
+ 0x37698, 0x376c4,
+ 0x376e4, 0x37790,
+ 0x37798, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x3792c,
+ 0x37934, 0x37950,
+ 0x37958, 0x37958,
+ 0x37960, 0x3798c,
+ 0x3799c, 0x379ac,
+ 0x379c0, 0x379c0,
+ 0x379c8, 0x379d0,
+ 0x379d8, 0x379e0,
+ 0x379ec, 0x37a90,
+ 0x37a98, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b28,
+ 0x37b38, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c28,
+ 0x37c38, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413c8,
+ 0x413d0, 0x413dc,
+ 0x413f0, 0x413f0,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x441ac,
+ 0x441b4, 0x4427c,
+ 0x442c0, 0x443ac,
+ 0x443b4, 0x4447c,
+ 0x444c0, 0x445ac,
+ 0x445b4, 0x4467c,
+ 0x446c0, 0x447ac,
+ 0x447b4, 0x4487c,
+ 0x448c0, 0x449ac,
+ 0x449b4, 0x44a7c,
+ 0x44ac0, 0x44bac,
+ 0x44bb4, 0x44c7c,
+ 0x44cc0, 0x44dac,
+ 0x44db4, 0x44e7c,
+ 0x44ec0, 0x44fac,
+ 0x44fb4, 0x4507c,
+ 0x450c0, 0x451ac,
+ 0x451b4, 0x451fc,
+ 0x45800, 0x45804,
+ 0x45810, 0x45830,
+ 0x45840, 0x45860,
+ 0x45868, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a04,
+ 0x45a10, 0x45a30,
+ 0x45a40, 0x45a60,
+ 0x45a68, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x47820, 0x4782c,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50400,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x51020,
+ 0x51028, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -1393,6 +1955,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
default:
dev_err(adap,
"Unsupported chip version %d\n", chip_version);
@@ -1929,35 +2496,180 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
}
/**
+ * t4_get_exprom_version - return the Expansion ROM version (if any)
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the Expansion ROM header from FLASH and returns the version
+ * number (if present) through the @vers return value pointer. We return
+ * this in the Firmware Version Format since it's convenient. Return
+ * 0 on success, -ENOENT if no Expansion ROM is present.
+ */
+static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
+{
+ struct exprom_header {
+ unsigned char hdr_arr[16]; /* must start with 0x55aa */
+ unsigned char hdr_ver[4]; /* Expansion ROM version */
+ } *hdr;
+ u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
+ sizeof(u32))];
+ int ret;
+
+ ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
+ ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
+ if (ret)
+ return ret;
+
+ hdr = (struct exprom_header *)exprom_header_buf;
+ if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
+ return -ENOENT;
+
+ *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
+ V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
+ V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
+ V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
+ return 0;
+}
+
+/**
* t4_get_fw_version - read the firmware version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the FW version from flash.
*/
-int t4_get_fw_version(struct adapter *adapter, u32 *vers)
+static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
}
/**
+ * t4_get_bs_version - read the firmware bootstrap version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW Bootstrap version from flash.
+ */
+static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
* t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the TP microcode version from flash.
*/
-int t4_get_tp_version(struct adapter *adapter, u32 *vers)
+static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
-#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
- FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+/**
+ * t4_get_version_info - extract various chip/firmware version information
+ * @adapter: the adapter
+ *
+ * Reads various chip/firmware version numbers and stores them into the
+ * adapter Adapter Parameters structure. If any of the efforts fails
+ * the first failure will be returned, but all of the version numbers
+ * will be read.
+ */
+int t4_get_version_info(struct adapter *adapter)
+{
+ int ret = 0;
+
+#define FIRST_RET(__getvinfo) \
+ do { \
+ int __ret = __getvinfo; \
+ if (__ret && !ret) \
+ ret = __ret; \
+ } while (0)
+
+ FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
+ FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
+ FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
+ FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
+
+#undef FIRST_RET
+
+ return ret;
+}
+
+/**
+ * t4_dump_version_info - dump all of the adapter configuration IDs
+ * @adapter: the adapter
+ *
+ * Dumps all of the various bits of adapter configuration version/revision
+ * IDs information. This is typically called at some point after
+ * t4_get_version_info() has been called.
+ */
+void t4_dump_version_info(struct adapter *adapter)
+{
+ /**
+ * Device information.
+ */
+ dev_info(adapter, "Chelsio rev %d\n",
+ CHELSIO_CHIP_RELEASE(adapter->params.chip));
+
+ /**
+ * Firmware Version.
+ */
+ if (!adapter->params.fw_vers)
+ dev_warn(adapter, "No firmware loaded\n");
+ else
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ /**
+ * Bootstrap Firmware Version.
+ */
+ if (!adapter->params.bs_vers)
+ dev_warn(adapter, "No bootstrap loaded\n");
+ else
+ dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
+
+ /**
+ * TP Microcode Version.
+ */
+ if (!adapter->params.tp_vers)
+ dev_warn(adapter, "No TP Microcode loaded\n");
+ else
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+
+ /**
+ * Expansion ROM version.
+ */
+ if (!adapter->params.er_vers)
+ dev_info(adapter, "No Expansion ROM loaded\n");
+ else
+ dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
+}
+
+#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
+ FW_PORT_CAP_ANEG)
/**
* t4_link_l1cfg - apply link configuration to MAC/PHY
@@ -1976,14 +2688,24 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
- unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+ unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+ unsigned int fc, fec;
lc->link_ok = 0;
+ fc = 0;
if (lc->requested_fc & PAUSE_RX)
fc |= FW_PORT_CAP_FC_RX;
if (lc->requested_fc & PAUSE_TX)
fc |= FW_PORT_CAP_FC_TX;
+ fec = 0;
+ if (lc->requested_fec & FEC_RS)
+ fec |= FW_PORT_CAP_FEC_RS;
+ if (lc->requested_fec & FEC_BASER_RS)
+ fec |= FW_PORT_CAP_FEC_BASER_RS;
+ if (lc->requested_fec & FEC_RESERVED)
+ fec |= FW_PORT_CAP_FEC_RESERVED;
+
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
@@ -1994,13 +2716,16 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
- fc);
- lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ fc | fec);
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = lc->requested_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
- lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
+ fec | mdi);
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = lc->requested_fec;
} else {
- c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
}
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -2044,7 +2769,9 @@ int t4_flash_cfg_addr(struct adapter *adapter)
void t4_intr_enable(struct adapter *adapter)
{
u32 val = 0;
- u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
@@ -2069,7 +2796,9 @@ void t4_intr_enable(struct adapter *adapter)
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
@@ -2098,6 +2827,12 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"QSA",
"QSFP",
"BP40_BA",
+ "KR4_100G",
+ "CR4_QSFP",
+ "CR_QSFP",
+ "CR2_QSFP",
+ "SFP28",
+ "KR_SFP28",
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -2108,21 +2843,90 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
/**
* t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
- * @idx: the port index
+ * @pidx: the port index
*
* Returns a bitmap indicating which MPS buffer groups are associated
* with the given port. Bit i is set if buffer group i is used by the
* port.
*/
-unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
{
- u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << (2 * pidx);
+ }
+ break;
+ }
- if (n == 0)
- return idx == 0 ? 0xf : 0;
- if (n == 1)
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
+}
+
+/**
+ * t4_get_tp_ch_map - return TP ingress channels associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which TP Ingress Channels are associated with
+ * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
+ */
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "TP Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ /* Note that this happens to be the same values as the MPS
+ * Buffer Group Map for these Chips. But we replicate the code
+ * here because they're really separate concepts.
+ */
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << pidx;
+ }
+ break;
+ }
+
+ dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
}
/**
@@ -2573,6 +3377,49 @@ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
}
/**
+ * t4_fl_pkt_align - return the fl packet alignment
+ * @adap: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4_fl_pkt_align(struct adapter *adap)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = t4_read_reg(adap, A_SGE_CONTROL);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adap->params.chip)) {
+ sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
* t4_fixup_host_params_compat - fix up host-dependent parameters
* @adap: the adapter
* @page_size: the host's Base Page Size
@@ -2617,6 +3464,10 @@ int t4_fixup_host_params_compat(struct adapter *adap,
X_INGPADBOUNDARY_SHIFT) |
V_EGRSTATUSPAGESIZE(stat_len != 64));
else {
+ unsigned int pack_align;
+ unsigned int ingpad, ingpack;
+ unsigned int pcie_cap;
+
/*
* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
@@ -2630,11 +3481,33 @@ int t4_fixup_host_params_compat(struct adapter *adap,
* Size (the minimum unit of transfer to/from Memory). If we
* have a Padding Boundary which is smaller than the Memory
* Line Size, that'll involve a Read-Modify-Write cycle on the
- * Memory Controller which is never good. For T5 the smallest
- * Padding Boundary which we can select is 32 bytes which is
- * larger than any known Memory Controller Line Size so we'll
- * use that.
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
*/
+ pack_align = fl_align;
+ pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ unsigned int mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
/*
* N.B. T5 has a different interpretation of the "0" value for
@@ -2644,19 +3517,36 @@ int t4_fixup_host_params_compat(struct adapter *adap,
* on the other hand, if we wanted 32 bytes, the best we can
* really do is 64 bytes ...
*/
- if (fl_align <= 32) {
+ if (pack_align <= 16) {
+ ingpack = X_INGPACKBOUNDARY_16B;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = X_INGPACKBOUNDARY_64B;
fl_align = 64;
- fl_align_log = 6;
+ } else {
+ unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
+
+ ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
+ fl_align = pack_align;
}
+
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
+ if (is_t5(adap->params.chip))
+ ingpad = X_INGPADBOUNDARY_32B;
+ else
+ ingpad = X_T6_INGPADBOUNDARY_8B;
t4_set_reg_field(adap, A_SGE_CONTROL,
V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
F_EGRSTATUSPAGESIZE,
- V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) |
+ V_INGPADBOUNDARY(ingpad) |
V_EGRSTATUSPAGESIZE(stat_len != 64));
t4_set_reg_field(adap, A_SGE_CONTROL2,
V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
- V_INGPACKBOUNDARY(fl_align_log -
- X_INGPACKBOUNDARY_SHIFT));
+ V_INGPACKBOUNDARY(ingpack));
}
/*
@@ -3194,7 +4084,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
/* link/module state change message */
- int speed = 0, fc = 0, i;
+ unsigned int speed = 0, fc = 0, i;
int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
struct port_info *pi = NULL;
struct link_config *lc;
@@ -3212,8 +4102,12 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
speed = ETH_SPEED_NUM_1G;
else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
speed = ETH_SPEED_NUM_10G;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+ speed = ETH_SPEED_NUM_25G;
else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
speed = ETH_SPEED_NUM_40G;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+ speed = ETH_SPEED_NUM_100G;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
@@ -3271,19 +4165,37 @@ void t4_reset_link_config(struct adapter *adap, int idx)
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
- * @caps: link capabilities
+ * @pcaps: link Port Capabilities
+ * @acaps: link current Advertised Port Capabilities
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int pcaps,
+ unsigned int acaps)
{
- lc->supported = caps;
+ unsigned int fec;
+
+ lc->supported = pcaps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = 0;
lc->fc = 0;
+
+ /**
+ * For Forward Error Control, we default to whatever the Firmware
+ * tells us the Link is currently advertising.
+ */
+ fec = 0;
+ if (acaps & FW_PORT_CAP_FEC_RS)
+ fec |= FEC_RS;
+ if (acaps & FW_PORT_CAP_FEC_BASER_RS)
+ fec |= FEC_BASER_RS;
+ if (acaps & FW_PORT_CAP_FEC_RESERVED)
+ fec |= FEC_RESERVED;
+ lc->requested_fec = fec;
+ lc->fec = fec;
+
if (lc->supported & FW_PORT_CAP_ANEG) {
lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
@@ -3312,8 +4224,12 @@ static int t4_wait_dev_ready(struct adapter *adapter)
msleep(500);
whoami = t4_read_reg(adapter, A_PL_WHOAMI);
- return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
- ? 0 : -EIO);
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ whoami);
+ return -EIO;
}
struct flash_desc {
@@ -3329,47 +4245,96 @@ int t4_get_flash_params(struct adapter *adapter)
* sectors.
*/
static struct flash_desc supported_flash[] = {
- { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
};
int ret;
- unsigned int i;
- u32 info = 0;
-
+ u32 flashid = 0;
+ unsigned int part, manufacturer;
+ unsigned int density, size;
+
+ /**
+ * Issue a Read ID Command to the Flash part. We decode supported
+ * Flash parts and their sizes from this. There's a newer Query
+ * Command which can retrieve detailed geometry information but
+ * many Flash parts don't support it.
+ */
ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = sf1_read(adapter, 3, 0, 1, &info);
+ ret = sf1_read(adapter, 3, 0, 1, &flashid);
t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
if (ret < 0)
return ret;
- for (i = 0; i < ARRAY_SIZE(supported_flash); ++i)
- if (supported_flash[i].vendor_and_model_id == info) {
- adapter->params.sf_size = supported_flash[i].size_mb;
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ adapter->params.sf_size =
+ supported_flash[part].size_mb;
adapter->params.sf_nsec =
adapter->params.sf_size / SF_SEC_SIZE;
- return 0;
+ goto found;
}
+ }
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
- return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- adapter->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- adapter->params.sf_nsec = 64;
- else
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /**
+ * This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14:
+ size = 1 << 20; /* 1MB */
+ break;
+ case 0x15:
+ size = 1 << 21; /* 2MB */
+ break;
+ case 0x16:
+ size = 1 << 22; /* 4MB */
+ break;
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ case 0x19:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x20:
+ size = 1 << 26; /* 64MB */
+ break;
+ case 0x21:
+ size = 1 << 27; /* 128MB */
+ break;
+ case 0x22:
+ size = 1 << 28; /* 256MB */
+ break;
+ default:
+ dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
+ return -EINVAL;
+ }
+
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+ break;
+ }
+ default:
+ dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
return -EINVAL;
- adapter->params.sf_size = 1 << info;
+ }
+found:
/*
* We should reject adapters with FLASHes which are too small. So, emit
* a warning.
*/
- if (adapter->params.sf_size < FLASH_MIN_SIZE) {
- dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
- adapter->params.sf_size, FLASH_MIN_SIZE);
- }
+ if (adapter->params.sf_size < FLASH_MIN_SIZE)
+ dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
@@ -3390,6 +4355,33 @@ static void set_pcie_completion_timeout(struct adapter *adapter,
}
/**
+ * t4_get_chip_type - Determine chip type from device ID
+ * @adap: the adapter
+ * @ver: adapter version
+ */
+int t4_get_chip_type(struct adapter *adap, int ver)
+{
+ enum chip_type chip = 0;
+ u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
+
+ /* Retrieve adapter's device ID */
+ switch (ver) {
+ case CHELSIO_T5:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ break;
+ case CHELSIO_T6:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ break;
+ default:
+ dev_err(adap, "Device %d is not supported\n",
+ adap->params.pci.device_id);
+ return -EINVAL;
+ }
+
+ return chip;
+}
+
+/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
*
@@ -3426,6 +4418,15 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.arch.nchan = NCHAN;
adapter->params.arch.vfcount = 128;
break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
+ break;
default:
dev_err(adapter, "%s: Device %d is not supported\n",
__func__, adapter->params.pci.device_id);
@@ -3436,8 +4437,11 @@ int t4_prep_adapter(struct adapter *adapter)
t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
ret = t4_get_flash_params(adapter);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
+ -ret);
return ret;
+ }
adapter->params.cim_la_size = CIMLA_SIZE;
@@ -3609,6 +4613,14 @@ int t4_init_tp_params(struct adapter *adap)
&adap->params.tp.ingress_config, 1,
A_TP_INGRESS_CONFIG);
+ /* For T6, cache the adapter's compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ v = t4_read_reg(adap, A_TP_OUT_CONFIG);
+ adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
+ }
+
/*
* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
@@ -3747,7 +4759,8 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->port_type = G_FW_PORT_CMD_PTYPE(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
+ init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
+ be16_to_cpu(c.u.info.acap));
j++;
}
return 0;
diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h
index 5e62c417..07498841 100644
--- a/drivers/net/cxgbe/base/t4_hw.h
+++ b/drivers/net/cxgbe/base/t4_hw.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,6 +124,14 @@ struct rsp_ctrl {
enum {
/*
+ * Various Expansion-ROM boot images, etc.
+ */
+ FLASH_EXP_ROM_START_SEC = 0,
+ FLASH_EXP_ROM_NSECS = 6,
+ FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+ FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+ /*
* Location of firmware image in FLASH.
*/
FLASH_FW_START_SEC = 8,
@@ -132,6 +140,14 @@ enum {
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
/*
+ * Location of bootstrap firmware image in FLASH.
+ */
+ FLASH_FWBOOTSTRAP_START_SEC = 27,
+ FLASH_FWBOOTSTRAP_NSECS = 1,
+ FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+ FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
+ /*
* Location of Firmware Configuration File in FLASH.
*/
FLASH_CFG_START_SEC = 31,
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 4b04cd0d..6acd749a 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -262,6 +262,20 @@ struct cpl_rx_pkt {
#define V_RXF_IP6(x) ((x) << S_RXF_IP6)
#define F_RXF_IP6 V_RXF_IP6(1U)
+/* rx_pkt.err_vec fields */
+/* In T6, rx_pkt.err_vec indicates
+ * RxError Error vector (16b) or
+ * Encapsulating header length (8b),
+ * Outer encapsulation type (2b) and
+ * compressed error vector (6b) if CRxPktEnc is
+ * enabled in TP_OUT_CONFIG
+ */
+#define S_T6_COMPR_RXERR_VEC 0
+#define M_T6_COMPR_RXERR_VEC 0x3F
+#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_VEC)
+#define G_T6_COMPR_RXERR_VEC(x) \
+ (((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC)
+
/* cpl_fw*.type values */
enum {
FW_TYPE_RSSCPL = 4,
diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
index 110fadb0..1230e738 100644
--- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h
+++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,19 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+
+ /* T6 adapter */
+ CH_PCI_ID_TABLE_FENTRY(0x6001), /* T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6002), /* T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6003), /* T6425-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6005), /* T6225-OCP */
+ CH_PCI_ID_TABLE_FENTRY(0x6007), /* T62100-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6008), /* T62100-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x600d), /* T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6011), /* T6225-LL-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6014), /* T61100-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6080), /* Custom T6225-CR SFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6081), /* Custom T62100-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 9c132dcd..1100e16f 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -420,6 +420,26 @@
#define A_PCIE_FW 0x30b8
#define A_PCIE_FW_PF 0x30bc
+#define A_PCIE_CFG2 0x3018
+
+#define S_TOTMAXTAG 0
+#define M_TOTMAXTAG 0x3U
+#define V_TOTMAXTAG(x) ((x) << S_TOTMAXTAG)
+
+#define S_T6_TOTMAXTAG 0
+#define M_T6_TOTMAXTAG 0x7U
+#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
+
+#define A_PCIE_CMD_CFG 0x5980
+
+#define S_MINTAG 0
+#define M_MINTAG 0xffU
+#define V_MINTAG(x) ((x) << S_MINTAG)
+
+#define S_T6_MINTAG 0
+#define M_T6_MINTAG 0xffU
+#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
+
/* registers for module CIM */
#define CIM_BASE_ADDR 0x7b00
@@ -443,6 +463,12 @@
#define F_UPCRST V_UPCRST(1U)
/* registers for module TP */
+#define A_TP_OUT_CONFIG 0x7d04
+
+#define S_CRXPKTENC 3
+#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC)
+#define F_CRXPKTENC V_CRXPKTENC(1U)
+
#define TP_BASE_ADDR 0x7d00
#define A_TP_TIMER_RESOLUTION 0x7d90
@@ -781,6 +807,11 @@
#define V_SOURCEPF(x) ((x) << S_SOURCEPF)
#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF)
+#define S_T6_SOURCEPF 9
+#define M_T6_SOURCEPF 0x7U
+#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
+#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
+
#define A_PL_PF_INT_ENABLE 0x3c4
#define S_PFSW 3
diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h
index d7d3144c..9085ff6d 100644
--- a/drivers/net/cxgbe/base/t4_regs_values.h
+++ b/drivers/net/cxgbe/base/t4_regs_values.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,10 +55,15 @@
#define X_RXPKTCPLMODE_SPLIT 1
#define X_INGPCIEBOUNDARY_32B 0
#define X_INGPADBOUNDARY_SHIFT 5
+#define X_INGPADBOUNDARY_32B 0
+
+#define X_T6_INGPADBOUNDARY_SHIFT 3
+#define X_T6_INGPADBOUNDARY_8B 0
/* CONTROL2 register */
#define X_INGPACKBOUNDARY_SHIFT 5
#define X_INGPACKBOUNDARY_16B 0
+#define X_INGPACKBOUNDARY_64B 1
/* GTS register */
#define X_TIMERREG_RESTART_COUNTER 6
@@ -77,7 +82,7 @@
/*
* Ingress Context field values
*/
-#define X_UPDATEDELIVERY_INTERRUPT 1
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
#define X_RSPD_TYPE_FLBUF 0
#define X_RSPD_TYPE_CPL 1
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 74f19fe7..6ca4f318 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,6 +84,7 @@ enum fw_memtype {
enum fw_wr_opcodes {
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_PKTS_WR = 0x09,
+ FW_ETH_TX_PKTS2_WR = 0x78,
};
/*
@@ -591,6 +592,13 @@ struct fw_iq_cmd {
#define G_FW_IQ_CMD_IQESIZE(x) \
(((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)
+#define S_FW_IQ_CMD_IQRO 30
+#define M_FW_IQ_CMD_IQRO 0x1
+#define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO)
+#define G_FW_IQ_CMD_IQRO(x) \
+ (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)
+#define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U)
+
#define S_FW_IQ_CMD_IQFLINTCONGEN 27
#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1
#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN)
@@ -1061,7 +1069,7 @@ struct fw_vi_stats_cmd {
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
FW_PORT_CAP_SPEED_1G = 0x0002,
- FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_25G = 0x0004,
FW_PORT_CAP_SPEED_10G = 0x0008,
FW_PORT_CAP_SPEED_40G = 0x0010,
FW_PORT_CAP_SPEED_100G = 0x0020,
@@ -1070,13 +1078,19 @@ enum fw_port_cap {
FW_PORT_CAP_ANEG = 0x0100,
FW_PORT_CAP_MDIX = 0x0200,
FW_PORT_CAP_MDIAUTO = 0x0400,
- FW_PORT_CAP_FEC = 0x0800,
- FW_PORT_CAP_TECHKR = 0x1000,
- FW_PORT_CAP_TECHKX4 = 0x2000,
+ FW_PORT_CAP_FEC_RS = 0x0800,
+ FW_PORT_CAP_FEC_BASER_RS = 0x1000,
+ FW_PORT_CAP_FEC_RESERVED = 0x2000,
FW_PORT_CAP_802_3_PAUSE = 0x4000,
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
+#define S_FW_PORT_CAP_SPEED 0
+#define M_FW_PORT_CAP_SPEED 0x3f
+#define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED)
+#define G_FW_PORT_CAP_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED)
+
enum fw_port_mdi {
FW_PORT_CAP_MDI_AUTO,
};
@@ -1279,7 +1293,12 @@ enum fw_port_type {
FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */
FW_PORT_TYPE_BP40_BA = 15,
/* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */
-
+ FW_PORT_TYPE_KR4_100G = 16, /* No, 4, 100G/40G/25G, Backplane */
+ FW_PORT_TYPE_CR4_QSFP = 17, /* No, 4, 100G/40G/25G */
+ FW_PORT_TYPE_CR_QSFP = 18, /* No, 1, 25G Spider cable */
+ FW_PORT_TYPE_CR2_QSFP = 19, /* No, 2, 50G */
+ FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
+ FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
};
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 9120c439..f9891545 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -47,6 +47,7 @@
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
int cxgbe_probe(struct adapter *adapter);
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
int cxgbe_up(struct adapter *adap);
int cxgbe_down(struct port_info *pi);
void cxgbe_close(struct adapter *adapter);
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index 1551cbf5..03bba9fe 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -226,6 +226,15 @@ static inline int cxgbe_fls(int x)
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
+/**
+ * cxgbe_ffs - find first bit set
+ * @x: the word to search
+ */
+static inline int cxgbe_ffs(int x)
+{
+ return x ? __builtin_ffs(x) : 0;
+}
+
static inline unsigned long ilog2(unsigned long n)
{
unsigned int e = 0;
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 598a7440..7bca4561 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,7 +58,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -104,7 +103,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
pkts_remain = nb_pkts - total_sent;
for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
- ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent]);
+ ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],
+ nb_pkts);
if (ret < 0)
break;
}
@@ -148,7 +148,7 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
.nb_align = 1,
};
- device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
@@ -174,7 +174,7 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->rx_desc_lim = cxgbe_desc_lim;
device_info->tx_desc_lim = cxgbe_desc_lim;
- device_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G;
+ cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}
static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
@@ -619,7 +619,7 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, t4_ethrx_handler,
- t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
+ t4_get_tp_ch_map(adapter, pi->tx_chan), mp,
queue_idx, socket_id);
dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
@@ -1010,7 +1010,7 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
@@ -1056,7 +1056,7 @@ static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_cxgbe_pmd = {
.id_table = cxgb4_pci_tbl,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = eth_cxgbe_pci_probe,
.remove = eth_cxgbe_pci_remove,
};
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 71c3671d..b709fe2b 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2016 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,7 +58,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -206,9 +205,12 @@ static inline bool is_x_1g_port(const struct link_config *lc)
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return ((lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_100G) != 0);
+ unsigned int speeds, high_speeds;
+
+ speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -298,7 +300,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
- init_rspq(adap, &r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
r->usembufs = 1;
r->fl.size = (r->usembufs ? 1024 : 72);
}
@@ -363,6 +365,17 @@ static int init_rss(struct adapter *adap)
return 0;
}
+/**
+ * Dump basic information about the adapter.
+ */
+static void print_adapter_info(struct adapter *adap)
+{
+ /**
+ * Hardware/Firmware/etc. Version/Revision IDs.
+ */
+ t4_dump_version_info(adap);
+}
+
static void print_port_info(struct adapter *adap)
{
int i;
@@ -374,13 +387,17 @@ static void print_port_info(struct adapter *adap)
char *bufp = buf;
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
- bufp += sprintf(bufp, "100/");
+ bufp += sprintf(bufp, "100M/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
- bufp += sprintf(bufp, "1000/");
+ bufp += sprintf(bufp, "1G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s",
@@ -396,6 +413,36 @@ static void print_port_info(struct adapter *adap)
}
}
+static void configure_pcie_ext_tag(struct adapter *adapter)
+{
+ u16 v;
+ int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+
+ if (!pos)
+ return;
+
+ if (pos > 0) {
+ t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_EXT_TAG;
+ t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
+ if (is_t6(adapter->params.chip)) {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
+ V_T6_TOTMAXTAG(7));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_T6_MINTAG(M_T6_MINTAG),
+ V_T6_MINTAG(8));
+ } else {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_TOTMAXTAG(M_TOTMAXTAG),
+ V_TOTMAXTAG(3));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_MINTAG(M_MINTAG),
+ V_MINTAG(8));
+ }
+ }
+}
+
/*
* Tweak configuration based on system architecture, etc. Most of these have
* defaults assigned to them by Firmware Configuration Files (if we're using
@@ -427,6 +474,9 @@ static int adap_init0_tweaks(struct adapter *adapter)
V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
+ t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
+ V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
+
t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
V_IDMAARBROUNDROBIN(1U));
@@ -575,7 +625,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
- * initialization constants burried in the driver.
+ * initialization constants buried in the driver.
*/
dev_info(adapter,
"Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
@@ -641,18 +691,7 @@ static int adap_init0(struct adapter *adap)
state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
}
- t4_get_fw_version(adap, &adap->params.fw_vers);
- t4_get_tp_version(adap, &adap->params.tp_vers);
-
- dev_info(adap, "fw: %u.%u.%u.%u, TP: %u.%u.%u.%u\n",
- G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers),
- G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers),
- G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers),
- G_FW_HDR_FW_VER_BUILD(adap->params.fw_vers),
- G_FW_HDR_FW_VER_MAJOR(adap->params.tp_vers),
- G_FW_HDR_FW_VER_MINOR(adap->params.tp_vers),
- G_FW_HDR_FW_VER_MICRO(adap->params.tp_vers),
- G_FW_HDR_FW_VER_BUILD(adap->params.tp_vers));
+ t4_get_version_info(adap);
ret = t4_get_core_clock(adap, &adap->params.vpd);
if (ret < 0) {
@@ -662,26 +701,6 @@ static int adap_init0(struct adapter *adap)
}
/*
- * Find out what ports are available to us. Note that we need to do
- * this before calling adap_init0_no_config() since it needs nports
- * and portvec ...
- */
- v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
- if (ret < 0) {
- dev_err(adap, "%s: failure in t4_queury_params; error = %d\n",
- __func__, ret);
- goto bye;
- }
-
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
-
- dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
- adap->params.nports);
-
- /*
* If the firmware is initialized already (and we're not forcing a
* master initialization), note that we're living with existing
* adapter parameters. Otherwise, it's time to try initializing the
@@ -705,6 +724,22 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+ /* Find out what ports are available to us. */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0) {
+ dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
+ __func__, ret);
+ goto bye;
+ }
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
+ dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
+ adap->params.nports);
+
/*
* Give the SGE code a chance to pull in anything that it needs ...
* Note that this must be called after we retrieve our VPD parameters
@@ -793,6 +828,7 @@ static int adap_init0(struct adapter *adap)
}
t4_init_sge_params(adap);
t4_init_tp_params(adap);
+ configure_pcie_ext_tag(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -833,10 +869,10 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
mod_str[pi->mod_type]);
else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
- dev_info(adap, "Port%d: unsupported optical port module inserted\n",
+ dev_info(adap, "Port%d: unsupported port module inserted\n",
pi->port_id);
else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
- dev_info(adap, "Port%d: unknown port module inserted, forcing TWINAX\n",
+ dev_info(adap, "Port%d: unknown port module inserted\n",
pi->port_id);
else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
dev_info(adap, "Port%d: transceiver module error\n",
@@ -997,6 +1033,110 @@ void cxgbe_enable_rx_queues(struct port_info *pi)
}
/**
+ * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @speed_caps: Device Info Speed Capabilities
+ *
+ * Translate a Firmware Port Capabilities specification to Device Info
+ * Speed Capabilities.
+ */
+static void fw_caps_to_speed_caps(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ u32 *speed_caps)
+{
+#define SET_SPEED(__speed_name) \
+ do { \
+ *speed_caps |= ETH_LINK_ ## __speed_name; \
+ } while (0)
+
+#define FW_CAPS_TO_SPEED(__fw_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ SET_SPEED(__fw_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ FW_CAPS_TO_SPEED(SPEED_100M);
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ case FW_PORT_TYPE_BP4_AP:
+ SET_SPEED(SPEED_1G);
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_SPEED(SPEED_40G);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_KR_SFP28:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ break;
+
+ case FW_PORT_TYPE_CR2_QSFP:
+ SET_SPEED(SPEED_50G);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ FW_CAPS_TO_SPEED(SPEED_40G);
+ FW_CAPS_TO_SPEED(SPEED_100G);
+ break;
+
+ default:
+ break;
+ }
+
+#undef FW_CAPS_TO_SPEED
+#undef SET_SPEED
+}
+
+/**
+ * cxgbe_get_speed_caps - Fetch supported speed capabilities
+ * @pi: Underlying port's info
+ * @speed_caps: Device Info speed capabilities
+ *
+ * Fetch supported speed capabilities of the underlying port.
+ */
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
+{
+ *speed_caps = 0;
+
+ fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported,
+ speed_caps);
+
+ if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG))
+ *speed_caps |= ETH_LINK_SPEED_FIXED;
+}
+
+/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
@@ -1062,10 +1202,20 @@ void cxgbe_close(struct adapter *adapter)
int cxgbe_probe(struct adapter *adapter)
{
struct port_info *pi;
+ int chip;
int func, i;
int err = 0;
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ chip = t4_get_chip_type(adapter,
+ CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
+ if (chip < 0)
+ return chip;
+
+ func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
- func = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
adapter->mbox = func;
adapter->pf = func;
@@ -1182,6 +1332,7 @@ allocate_mac:
cfg_queues(adapter->eth_dev);
+ print_adapter_info(adapter);
print_port_info(adapter);
err = init_rss(adapter);
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 2f9e12c9..5376fc50 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -57,7 +57,6 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -388,7 +387,7 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
struct rte_pktmbuf_pool_private *mbp_priv;
u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
- /* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */
+ /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
if (jumbo_en &&
((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
@@ -415,12 +414,18 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
}
rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->data_off =
+ (uint16_t)(RTE_PTR_ALIGN((char *)mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM,
+ adap->sge.fl_align) -
+ (char *)mbuf->buf_addr);
mbuf->next = NULL;
mbuf->nb_segs = 1;
mbuf->port = rxq->rspq.port_id;
- mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off);
+ mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_physaddr +
+ mbuf->data_off,
+ adap->sge.fl_align);
mapping |= buf_size_idx;
*d++ = cpu_to_be64(mapping);
set_rx_sw_desc(sd, mbuf, mapping);
@@ -591,7 +596,7 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
* Write Header (incorporated as part of the cpl_tx_pkt_lso and
* cpl_tx_pkt structures), followed by either a TX Packet Write CPL
* message or, if we're doing a Large Send Offload, an LSO CPL message
- * with an embeded TX Packet Write CPL message.
+ * with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(m->nb_segs);
if (m->tso_segsz)
@@ -681,6 +686,10 @@ static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
+#define PIDXDIFF(head, tail, wrap) \
+ ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
+#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
+
/**
* ring_tx_db - ring a Tx queue's doorbell
* @adap: the adapter
@@ -771,7 +780,7 @@ static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
}
if (likely(csum_type >= TX_CSUM_TCPIP)) {
- int hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
+ u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
int eth_hdr_len = m->l2_len;
if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
@@ -846,7 +855,7 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
/* fill the pkts WR header */
wr = (void *)&q->desc[q->pidx];
- wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
ndesc = flits_to_desc(q->coalesce.flits);
@@ -969,7 +978,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
struct rte_mbuf *mbuf,
int flits, struct adapter *adap,
const struct port_info *pi,
- dma_addr_t *addr)
+ dma_addr_t *addr, uint16_t nb_pkts)
{
u64 cntrl, *end;
struct sge_txq *q = &txq->q;
@@ -979,6 +988,10 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
struct tx_sw_desc *sd;
unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+#ifdef RTE_LIBRTE_CXGBE_TPUT
+ RTE_SET_USED(nb_pkts);
+#endif
+
if (q->coalesce.type == 0) {
mc = (struct ulp_txpkt *)q->coalesce.ptr;
mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
@@ -1048,7 +1061,11 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
sd->coalesce.idx = (idx & 1) + 1;
/* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM)
+ if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM
+#ifndef RTE_LIBRTE_CXGBE_TPUT
+ || q->coalesce.idx >= nb_pkts
+#endif
+ )
ship_tx_pkt_coalesce_wr(adap, txq);
return 0;
}
@@ -1060,7 +1077,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
-int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts)
{
const struct port_info *pi;
struct cpl_tx_pkt_lso_core *lso;
@@ -1114,7 +1132,7 @@ out_free:
}
rte_prefetch0((volatile void *)addr);
return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
- pi, addr);
+ pi, addr, nb_pkts);
} else {
return -EBUSY;
}
@@ -1185,9 +1203,15 @@ out_free:
else
lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
cpl = (void *)(lso + 1);
- cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- V_TXPKT_IPHDR_LEN(l3hdr_len) |
- V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+ else
+ cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 :
+ TX_CSUM_TCPIP) |
+ V_TXPKT_IPHDR_LEN(l3hdr_len);
txq->stats.tso++;
txq->stats.tx_cso += m->tso_segsz;
}
@@ -1344,10 +1368,16 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct rss_header *rss_hdr;
bool csum_ok;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ u16 err_vec;
rss_hdr = (const void *)rsp;
pkt = (const void *)&rsp[1];
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ /* Compressed error vector is enabled for T6 only */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
+ else
+ err_vec = ntohs(pkt->err_vec);
+ csum_ok = pkt->csum_calc && !err_vec;
mbuf = t4_pktgl_to_mbuf(si);
if (unlikely(!mbuf)) {
@@ -1384,20 +1414,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return 0;
}
-/**
- * is_new_response - check if a response is newly written
- * @r: the response descriptor
- * @q: the response queue
- *
- * Returns true if a response descriptor contains a yet unprocessed
- * response.
- */
-static inline bool is_new_response(const struct rsp_ctrl *r,
- const struct sge_rspq *q)
-{
- return (r->u.type_gen >> S_RSPD_GEN) == q->gen;
-}
-
#define CXGB4_MSG_AN ((void *)1)
/**
@@ -1439,12 +1455,12 @@ static int process_responses(struct sge_rspq *q, int budget,
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
while (likely(budget_left)) {
+ if (q->cidx == ntohs(q->stat->pidx))
+ break;
+
rc = (const struct rsp_ctrl *)
((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
- if (!is_new_response(rc, q))
- break;
-
/*
* Ensure response has been read
*/
@@ -1452,63 +1468,101 @@ static int process_responses(struct sge_rspq *q, int budget,
rsp_type = G_RSPD_TYPE(rc->u.type_gen);
if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
- const struct rx_sw_desc *rsd =
- &rxq->fl.sdesc[rxq->fl.cidx];
- const struct rss_header *rss_hdr =
- (const void *)q->cur_desc;
- const struct cpl_rx_pkt *cpl =
- (const void *)&q->cur_desc[1];
- bool csum_ok = cpl->csum_calc && !cpl->err_vec;
- struct rte_mbuf *pkt, *npkt;
- u32 len, bufsz;
-
- len = ntohl(rc->pldbuflen_qid);
- BUG_ON(!(len & F_RSPD_NEWBUF));
- pkt = rsd->buf;
- npkt = pkt;
- len = G_RSPD_LEN(len);
- pkt->pkt_len = len;
-
- /* Chain mbufs into len if necessary */
- while (len) {
- struct rte_mbuf *new_pkt = rsd->buf;
-
- bufsz = min(get_buf_size(q->adapter, rsd), len);
- new_pkt->data_len = bufsz;
- unmap_rx_buf(&rxq->fl);
- len -= bufsz;
- npkt->next = new_pkt;
- npkt = new_pkt;
- pkt->nb_segs++;
- rsd = &rxq->fl.sdesc[rxq->fl.cidx];
- }
- npkt->next = NULL;
- pkt->nb_segs--;
-
- if (cpl->l2info & htonl(F_RXF_IP)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV4;
- if (unlikely(!csum_ok))
- pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if ((cpl->l2info &
- htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
- pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- } else if (cpl->l2info & htonl(F_RXF_IP6)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV6;
- }
+ unsigned int stat_pidx;
+ int stat_pidx_diff;
+
+ stat_pidx = ntohs(q->stat->pidx);
+ stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
+ while (stat_pidx_diff && budget_left) {
+ const struct rx_sw_desc *rsd =
+ &rxq->fl.sdesc[rxq->fl.cidx];
+ const struct rss_header *rss_hdr =
+ (const void *)q->cur_desc;
+ const struct cpl_rx_pkt *cpl =
+ (const void *)&q->cur_desc[1];
+ struct rte_mbuf *pkt, *npkt;
+ u32 len, bufsz;
+ bool csum_ok;
+ u16 err_vec;
+
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc +
+ (q->iqe_len - sizeof(*rc)));
+
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+ if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
+ break;
+
+ len = ntohl(rc->pldbuflen_qid);
+ BUG_ON(!(len & F_RSPD_NEWBUF));
+ pkt = rsd->buf;
+ npkt = pkt;
+ len = G_RSPD_LEN(len);
+ pkt->pkt_len = len;
+
+ /* Compressed error vector is enabled for
+ * T6 only
+ */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(
+ ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ /* Chain mbufs into len if necessary */
+ while (len) {
+ struct rte_mbuf *new_pkt = rsd->buf;
+
+ bufsz = min(get_buf_size(q->adapter,
+ rsd), len);
+ new_pkt->data_len = bufsz;
+ unmap_rx_buf(&rxq->fl);
+ len -= bufsz;
+ npkt->next = new_pkt;
+ npkt = new_pkt;
+ pkt->nb_segs++;
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ }
+ npkt->next = NULL;
+ pkt->nb_segs--;
+
+ if (cpl->l2info & htonl(F_RXF_IP)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV4;
+ if (unlikely(!csum_ok))
+ pkt->ol_flags |=
+ PKT_RX_IP_CKSUM_BAD;
+
+ if ((cpl->l2info &
+ htonl(F_RXF_UDP | F_RXF_TCP)) &&
+ !csum_ok)
+ pkt->ol_flags |=
+ PKT_RX_L4_CKSUM_BAD;
+ } else if (cpl->l2info & htonl(F_RXF_IP6)) {
+ pkt->packet_type = RTE_PTYPE_L3_IPV6;
+ }
- if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
- pkt->ol_flags |= PKT_RX_RSS_HASH;
- pkt->hash.rss = ntohl(rss_hdr->hash_val);
- }
+ if (!rss_hdr->filter_tid &&
+ rss_hdr->hash_type) {
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->hash.rss =
+ ntohl(rss_hdr->hash_val);
+ }
+
+ if (cpl->vlan_ex) {
+ pkt->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt->vlan_tci = ntohs(cpl->vlan);
+ }
+
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += pkt->pkt_len;
+ rx_pkts[budget - budget_left] = pkt;
- if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN_PKT;
- pkt->vlan_tci = ntohs(cpl->vlan);
+ rspq_next(q);
+ budget_left--;
+ stat_pidx_diff--;
}
- rxq->stats.pkts++;
- rxq->stats.rx_bytes += pkt->pkt_len;
- rx_pkts[budget - budget_left] = pkt;
+ continue;
} else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
@@ -1523,35 +1577,6 @@ static int process_responses(struct sge_rspq *q, int budget,
rspq_next(q);
budget_left--;
-
- if (R_IDXDIFF(q, gts_idx) >= 64) {
- unsigned int cidx_inc = R_IDXDIFF(q, gts_idx);
- unsigned int params;
- u32 val;
-
- if (fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
- __refill_fl(q->adapter, &rxq->fl);
- params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
- q->next_intr_params = params;
- val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
-
- if (unlikely(!q->bar2_addr))
- t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
- val |
- V_INGRESSQID((u32)q->cntxt_id));
- else {
- writel(val | V_INGRESSQID(q->bar2_qid),
- (void *)((uintptr_t)q->bar2_addr +
- SGE_UDB_GTS));
- /*
- * This Write memory Barrier will force the
- * write to the User Doorbell area to be
- * flushed.
- */
- wmb();
- }
- q->gts_idx = q->cidx;
- }
}
/*
@@ -1569,10 +1594,38 @@ static int process_responses(struct sge_rspq *q, int budget,
int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done)
{
- int err = 0;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ unsigned int cidx_inc;
+ unsigned int params;
+ u32 val;
*work_done = process_responses(q, budget, rx_pkts);
- return err;
+
+ if (*work_done) {
+ cidx_inc = R_IDXDIFF(q, gts_idx);
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ params = q->intr_params;
+ q->next_intr_params = params;
+ val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
+
+ if (unlikely(!q->bar2_addr)) {
+ t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
+ val | V_INGRESSQID((u32)q->cntxt_id));
+ } else {
+ writel(val | V_INGRESSQID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));
+ /* This Write memory Barrier will force the
+ * write to the User Doorbell area to be
+ * flushed.
+ */
+ wmb();
+ }
+ q->gts_idx = q->cidx;
+ }
+ return 0;
}
/**
@@ -1641,7 +1694,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = cxgbe_roundup(iq->size, 16);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->data->drv_name,
+ eth_dev->device->driver->name,
fwevtq ? "fwq_ring" : "rx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1662,24 +1715,25 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
V_FW_IQ_CMD_IQASYNCH(fwevtq) |
V_FW_IQ_CMD_VIID(pi->viid) |
V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
- V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |
+ V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize =
- htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
+ pi->tx_chan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);
+ c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ F_FW_IQ_CMD_IQRO);
if (fl) {
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
fl);
- enum chip_type chip = (enum chip_type)CHELSIO_CHIP_VERSION(
- adap->params.chip);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
/*
* Allocate the ring for the hardware free list (with space
@@ -1694,7 +1748,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = cxgbe_roundup(fl->size, 8);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->data->drv_name,
+ eth_dev->device->driver->name,
fwevtq ? "fwq_ring" : "fl_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1725,9 +1779,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* Hence maximum allowed burst size will be 448 bytes.
*/
c.fl0dcaen_to_fl0cidxfthresh =
- htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
- V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ?
- X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
+ htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMIN_128B :
+ X_FETCHBURSTMIN_64B) |
+ V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMAX_512B :
+ X_FETCHBURSTMAX_256B));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
@@ -1746,6 +1803,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
&iq->bar2_qid);
iq->size--; /* subtract status entry */
+ iq->stat = (void *)&iq->desc[iq->size * 8];
iq->eth_dev = eth_dev;
iq->handler = hnd;
iq->port_id = pi->port_id;
@@ -1890,7 +1948,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->data->drv_name, "tx_ring",
+ eth_dev->device->driver->name, "tx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -2183,8 +2241,7 @@ static int t4_sge_init_soft(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_control2, sge_conm_ctrl;
- unsigned int ingpadboundary, ingpackboundary;
+ u32 sge_control, sge_conm_ctrl;
int ret, egress_threshold;
/*
@@ -2194,34 +2251,7 @@ int t4_sge_init(struct adapter *adap)
sge_control = t4_read_reg(adap, A_SGE_CONTROL);
s->pktshift = G_PKTSHIFT(sge_control);
s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
-
- /*
- * T4 uses a single control field to specify both the PCIe Padding and
- * Packing Boundary. T5 introduced the ability to specify these
- * separately. The actual Ingress Packet Data alignment boundary
- * within Packed Buffer Mode is the maximum of these two
- * specifications.
- */
- ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
- s->fl_align = ingpadboundary;
-
- if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) {
- /*
- * T5 has a weird interpretation of one of the PCIe Packing
- * Boundary values. No idea why ...
- */
- sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
- ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
- if (ingpackboundary == X_INGPACKBOUNDARY_16B)
- ingpackboundary = 16;
- else
- ingpackboundary = 1 << (ingpackboundary +
- X_INGPACKBOUNDARY_SHIFT);
-
- s->fl_align = max(ingpadboundary, ingpackboundary);
- }
-
+ s->fl_align = t4_fl_pkt_align(adap);
ret = t4_sge_init_soft(adap);
if (ret < 0) {
dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index ce65542a..32cd819b 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -1,7 +1,7 @@
# BSD LICENSE
#
# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright (c) 2016 NXP. All rights reserved.
+# Copyright 2016 NXP.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 3dc60ccc..4c82aa87 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <dpaa2_hw_pvt.h>
@@ -91,8 +90,9 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
&tc_cfg);
rte_free(p_params);
if (ret) {
- RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
- " err code: %d\n", ret);
+ RTE_LOG(ERR, PMD,
+ "Setting distribution for Rx failed with err: %d\n",
+ ret);
return ret;
}
@@ -134,8 +134,9 @@ int dpaa2_remove_flow_dist(
&tc_cfg);
rte_free(p_params);
if (ret) {
- RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
- " err code: %d\n", ret);
+ RTE_LOG(ERR, PMD,
+ "Setting distribution for Rx failed with err: %d\n",
+ ret);
return ret;
}
return ret;
@@ -306,15 +307,22 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
*/
/* ... rx buffer layout ... */
- tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
- tot_size = RTE_ALIGN_CEIL(tot_size,
- DPAA2_PACKET_LAYOUT_ALIGN);
+ tot_size = RTE_PKTMBUF_HEADROOM;
+ tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
-
- layout.data_head_room =
- tot_size - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
+ layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+
+ layout.pass_frame_status = 1;
+ layout.private_data_size = DPAA2_FD_PTA_SIZE;
+ layout.pass_parser_result = 1;
+ layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
+ layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
+ DPAA2_MBUF_HW_ANNOTATION;
retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_RX, &layout);
if (retcode) {
@@ -327,9 +335,8 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
bpool_cfg.num_dpbp = 1;
bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
bpool_cfg.pools[0].backup_pool = 0;
- bpool_cfg.pools[0].buffer_size =
- RTE_ALIGN_CEIL(bp_list->buf_pool.size,
- 256 /*DPAA2_PACKET_LAYOUT_ALIGN*/);
+ bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
+ DPAA2_PACKET_LAYOUT_ALIGN);
retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
if (retcode != 0) {
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
index 9324c6a3..e68febff 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 45764420..429b3a08 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,7 +42,6 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <rte_fslmc.h>
#include <fslmc_logs.h>
@@ -50,10 +49,14 @@
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_mempool.h>
#include <dpaa2_hw_dpio.h>
-
+#include <mc/fsl_dpmng.h>
#include "dpaa2_ethdev.h"
static struct rte_dpaa2_driver rte_dpaa2_pmd;
+static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
+static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
+static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
+static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
/**
* Atomically reads the link status information from global
@@ -107,6 +110,89 @@ dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
+static int
+dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return -1;
+ }
+
+ if (on)
+ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
+ priv->token, vlan_id);
+ else
+ ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
+ priv->token, vlan_id);
+
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
+ ret, vlan_id, priv->hw_id);
+
+ return ret;
+}
+
+static void
+dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
+ priv->token, true);
+ else
+ ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
+ priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
+ ret);
+ }
+}
+
+static int
+dpaa2_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version,
+ size_t fw_size)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct mc_soc_version mc_plat_info = {0};
+ struct mc_version mc_ver_info = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
+ RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n");
+
+ if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
+ RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
+
+ ret = snprintf(fw_version, fw_size,
+ "%x-%d.%d.%d",
+ mc_plat_info.svr,
+ mc_ver_info.major,
+ mc_ver_info.minor,
+ mc_ver_info.revision);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -176,13 +262,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
for (i = 0; i < priv->nb_tx_queues; i++) {
mc_q->dev = dev;
- mc_q->flow_id = DPNI_NEW_FLOW_ID;
+ mc_q->flow_id = 0xffff;
priv->tx_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ dpaa2_q->cscn = rte_malloc(NULL,
+ sizeof(struct qbman_result), 16);
+ if (!dpaa2_q->cscn)
+ goto fail_tx;
}
vq_id = 0;
- for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
- dist_idx++) {
+ for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
mcq->tc_index = DPAA2_DEF_TC;
mcq->flow_id = dist_idx;
@@ -190,6 +280,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
}
return 0;
+fail_tx:
+ i -= 1;
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ rte_free(dpaa2_q->cscn);
+ priv->tx_vq[i--] = NULL;
+ }
+ i = priv->nb_rx_queues;
fail:
i -= 1;
mc_q = priv->rx_vq[0];
@@ -212,6 +310,20 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ if (eth_conf->rxmode.jumbo_frame == 1) {
+ if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
+ ret = dpaa2_dev_mtu_set(dev,
+ eth_conf->rxmode.max_rx_pkt_len);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "unable to set mtu. check config\n");
+ return ret;
+ }
+ } else {
+ return -1;
+ }
+ }
+
/* Check for correct configuration */
if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
data->nb_rx_queues > 1) {
@@ -248,6 +360,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct mc_soc_version mc_plat_info = {0};
struct dpaa2_queue *dpaa2_q;
struct dpni_queue cfg;
uint8_t options = 0;
@@ -270,21 +383,25 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
- /*Get the tc id and flow id from given VQ id*/
- flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
+ /*Get the flow id from given VQ id*/
+ flow_id = rx_queue_id % priv->nb_rx_queues;
memset(&cfg, 0, sizeof(struct dpni_queue));
options = options | DPNI_QUEUE_OPT_USER_CTX;
cfg.user_context = (uint64_t)(dpaa2_q);
/*if ls2088 or rev2 device, enable the stashing */
- if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
+
+ if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
+ PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
+
+ if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) {
options |= DPNI_QUEUE_OPT_FLC;
cfg.flc.stash_control = true;
cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
/* 00 00 00 - last 6 bit represent annotation, context stashing,
* data stashing setting 01 01 00 (0x14) to enable
- * 1 line annotation, 1 line context
+ * 1 line data, 1 line annotation
*/
cfg.flc.value |= 0x14;
}
@@ -295,6 +412,25 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
+ if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
+ struct dpni_taildrop taildrop;
+
+ taildrop.enable = 1;
+ /*enabling per rx queue congestion control */
+ taildrop.threshold = CONG_THRESHOLD_RX_Q;
+ taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
+ PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
+ rx_queue_id);
+ ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index, flow_id, &taildrop);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in setting the rx flow"
+ " err : = %d\n", ret);
+ return -1;
+ }
+ }
+
dev->data->rx_queues[rx_queue_id] = dpaa2_q;
return 0;
}
@@ -319,19 +455,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
/* Return if queue already configured */
- if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
+ if (dpaa2_q->flow_id != 0xffff)
return 0;
memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
- if (priv->num_tc == 1) {
- tc_id = 0;
- flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
- } else {
- tc_id = tx_queue_id;
- flow_id = 0;
- }
+ tc_id = tx_queue_id;
+ flow_id = 0;
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
tc_id, flow_id, options, &tx_flow_cfg);
@@ -357,6 +488,35 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
dpaa2_q->tc_index = tc_id;
+ if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
+ struct dpni_congestion_notification_cfg cong_notif_cfg;
+
+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
+ /* Notify that the queue is not congested when the data in
+ * the queue is below this thershold.
+ */
+ cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
+ cong_notif_cfg.message_ctx = 0;
+ cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
+ cong_notif_cfg.notification_mode =
+ DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+ DPNI_CONG_OPT_COHERENT_WRITE;
+
+ ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_QUEUE_TX,
+ tc_id,
+ &cong_notif_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "Error in setting tx congestion notification: = %d",
+ -ret);
+ return -ret;
+ }
+ }
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
return 0;
}
@@ -390,7 +550,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == dpaa2_dev_rx)
+ if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
return ptypes;
return NULL;
}
@@ -417,6 +577,9 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
return ret;
}
+ /* Power up the phy. Needed to make the link go Up */
+ dpaa2_dev_set_link_up(dev);
+
ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &qdid);
if (ret) {
@@ -479,6 +642,9 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
"code = %d\n", ret);
return ret;
}
+ /* VLAN Offload Settings */
+ if (priv->max_vlan_filters)
+ dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
return 0;
}
@@ -497,6 +663,8 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ dpaa2_dev_set_link_down(dev);
+
ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
@@ -512,12 +680,23 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
static void
dpaa2_dev_close(struct rte_eth_dev *dev)
{
+ struct rte_eth_dev_data *data = dev->data;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- int ret;
+ int i, ret;
+ struct rte_eth_link link;
+ struct dpaa2_queue *dpaa2_q;
PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
+ if (!dpaa2_q->cscn) {
+ rte_free(dpaa2_q->cscn);
+ dpaa2_q->cscn = NULL;
+ }
+ }
+
/* Clean the device first */
ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
@@ -525,6 +704,9 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
" error code %d\n", ret);
return;
}
+
+ memset(&link, 0, sizeof(link));
+ dpaa2_dev_atomic_write_link_status(dev, &link);
}
static void
@@ -538,13 +720,17 @@ dpaa2_dev_promiscuous_enable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
+ RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret);
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret);
}
static void
@@ -558,13 +744,65 @@ dpaa2_dev_promiscuous_disable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
+ RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret);
+
+ if (dev->data->all_multicast == 0) {
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
+ priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD,
+ "Unable to disable M promisc mode %d\n",
+ ret);
+ }
+}
+
+static void
+dpaa2_dev_allmulticast_enable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret);
+}
+
+static void
+dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ /* must remain on for all promiscuous */
+ if (dev->data->promiscuous == 1)
+ return;
+
+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret);
}
static int
@@ -578,7 +816,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return -EINVAL;
}
@@ -586,6 +824,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
return -EINVAL;
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
/* Set the Max Rx frame length as 'mtu' +
* Maximum Ethernet header length
*/
@@ -599,6 +842,79 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
+static int
+dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return -1;
+ }
+
+ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, addr->addr_bytes);
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "error: Adding the MAC ADDR failed: err = %d\n", ret);
+ return 0;
+}
+
+static void
+dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *macaddr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ macaddr = &data->mac_addrs[index];
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, macaddr->addr_bytes);
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "error: Removing the MAC ADDR failed: err = %d\n", ret);
+}
+
+static void
+dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return;
+ }
+
+ ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
+ priv->token, addr->addr_bytes);
+
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "error: Setting the MAC ADDR failed %d\n", ret);
+}
static
void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
@@ -614,12 +930,12 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (!dpni) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
if (!stats) {
- RTE_LOG(ERR, PMD, "stats is NULL");
+ RTE_LOG(ERR, PMD, "stats is NULL\n");
return;
}
@@ -647,7 +963,11 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
if (retcode)
goto err;
- stats->ierrors = value.page_2.ingress_discarded_frames;
+ /* Ingress drop frame count due to configured rules */
+ stats->ierrors = value.page_2.ingress_filtered_frames;
+ /* Ingress drop frame count due to error */
+ stats->ierrors += value.page_2.ingress_discarded_frames;
+
stats->oerrors = value.page_2.egress_discarded_frames;
stats->imissed = value.page_2.ingress_nobuffer_discards;
@@ -668,7 +988,7 @@ void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return;
}
@@ -697,7 +1017,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "error : dpni is NULL");
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
return 0;
}
memset(&old, 0, sizeof(old));
@@ -705,7 +1025,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
+ RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
return -1;
}
@@ -732,6 +1052,253 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Toggle the DPNI to enable, if not already enabled.
+ * This is not strictly PHY up/down - it is more of logical toggling.
+ */
+static int
+dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ int en = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "DPNI is NULL\n");
+ return ret;
+ }
+
+ /* Check if DPNI is currently enabled */
+ ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
+ if (ret) {
+ /* Unable to obtain dpni status; Not continuing */
+ PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ return -EINVAL;
+ }
+
+ /* Enable link if not already enabled */
+ if (!en) {
+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ return -EINVAL;
+ }
+ }
+ /* changing tx burst function to start enqueues */
+ dev->tx_pkt_burst = dpaa2_dev_tx;
+ dev->data->dev_link.link_status = 1;
+
+ PMD_DRV_LOG(INFO, "Port %d Link UP successful", dev->data->port_id);
+ return ret;
+}
+
+/**
+ * Toggle the DPNI to disable, if not already disabled.
+ * This is not strictly PHY up/down - it is more of logical toggling.
+ */
+static int
+dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ int dpni_enabled = 0;
+ int retries = 10;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "Device has not yet been configured\n");
+ return ret;
+ }
+
+ /*changing tx burst function to avoid any more enqueues */
+ dev->tx_pkt_burst = dummy_dev_tx;
+
+ /* Loop while dpni_disable() attempts to drain the egress FQs
+ * and confirm them back to us.
+ */
+ do {
+ ret = dpni_disable(dpni, 0, priv->token);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
+ return ret;
+ }
+ ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
+ return ret;
+ }
+ if (dpni_enabled)
+ /* Allow the MC some slack */
+ rte_delay_us(100 * 1000);
+ } while (dpni_enabled && --retries);
+
+ if (!retries) {
+ PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
+ /* todo- we may have to manually cleanup queues.
+ */
+ } else {
+ PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
+ dev->data->port_id);
+ }
+
+ dev->data->dev_link.link_status = 0;
+
+ return ret;
+}
+
+static int
+dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ struct dpni_link_state state = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL || fc_conf == NULL) {
+ RTE_LOG(ERR, PMD, "device not configured\n");
+ return ret;
+ }
+
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
+ return ret;
+ }
+
+ memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ if (state.options & DPNI_LINK_OPT_PAUSE) {
+ /* DPNI_LINK_OPT_PAUSE set
+ * if ASYM_PAUSE not set,
+ * RX Side flow control (handle received Pause frame)
+ * TX side flow control (send Pause frame)
+ * if ASYM_PAUSE set,
+ * RX Side flow control (handle received Pause frame)
+ * No TX side flow control (send Pause frame disabled)
+ */
+ if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
+ fc_conf->mode = RTE_FC_FULL;
+ else
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ } else {
+ /* DPNI_LINK_OPT_PAUSE not set
+ * if ASYM_PAUSE set,
+ * TX side flow control (send Pause frame)
+ * No RX side flow control (No action on pause frame rx)
+ * if ASYM_PAUSE not set,
+ * Flow control disabled
+ */
+ if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return ret;
+}
+
+static int
+dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ int ret = -EINVAL;
+ struct dpaa2_dev_priv *priv;
+ struct fsl_mc_io *dpni;
+ struct dpni_link_state state = {0};
+ struct dpni_link_cfg cfg = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = dev->data->dev_private;
+ dpni = (struct fsl_mc_io *)priv->hw;
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ return ret;
+ }
+
+ /* It is necessary to obtain the current state before setting fc_conf
+ * as MC would return error in case rate, autoneg or duplex values are
+ * different.
+ */
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret);
+ return -1;
+ }
+
+ /* Disable link before setting configuration */
+ dpaa2_dev_set_link_down(dev);
+
+ /* Based on fc_conf, update cfg */
+ cfg.rate = state.rate;
+ cfg.options = state.options;
+
+ /* update cfg with fc_conf */
+ switch (fc_conf->mode) {
+ case RTE_FC_FULL:
+ /* Full flow control;
+ * OPT_PAUSE set, ASYM_PAUSE not set
+ */
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ case RTE_FC_TX_PAUSE:
+ /* Enable RX flow control
+ * OPT_PAUSE not set;
+ * ASYM_PAUSE set;
+ */
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ break;
+ case RTE_FC_RX_PAUSE:
+ /* Enable TX Flow control
+ * OPT_PAUSE set
+ * ASYM_PAUSE set
+ */
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ case RTE_FC_NONE:
+ /* Disable Flow control
+ * OPT_PAUSE not set
+ * ASYM_PAUSE not set
+ */
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n",
+ fc_conf->mode);
+ return -1;
+ }
+
+ ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
+ if (ret)
+ RTE_LOG(ERR, PMD,
+ "Unable to set Link configuration (err=%d)\n",
+ ret);
+
+ /* Enable link */
+ dpaa2_dev_set_link_up(dev);
+
+ return ret;
+}
+
static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_configure = dpaa2_eth_dev_configure,
.dev_start = dpaa2_dev_start,
@@ -739,16 +1306,28 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_close = dpaa2_dev_close,
.promiscuous_enable = dpaa2_dev_promiscuous_enable,
.promiscuous_disable = dpaa2_dev_promiscuous_disable,
+ .allmulticast_enable = dpaa2_dev_allmulticast_enable,
+ .allmulticast_disable = dpaa2_dev_allmulticast_disable,
+ .dev_set_link_up = dpaa2_dev_set_link_up,
+ .dev_set_link_down = dpaa2_dev_set_link_down,
.link_update = dpaa2_dev_link_update,
.stats_get = dpaa2_dev_stats_get,
.stats_reset = dpaa2_dev_stats_reset,
+ .fw_version_get = dpaa2_fw_version_get,
.dev_infos_get = dpaa2_dev_info_get,
.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
.mtu_set = dpaa2_dev_mtu_set,
+ .vlan_filter_set = dpaa2_vlan_filter_set,
+ .vlan_offload_set = dpaa2_vlan_offload_set,
.rx_queue_setup = dpaa2_dev_rx_queue_setup,
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
.tx_queue_release = dpaa2_dev_tx_queue_release,
+ .flow_ctrl_get = dpaa2_flow_ctrl_get,
+ .flow_ctrl_set = dpaa2_flow_ctrl_set,
+ .mac_addr_add = dpaa2_dev_add_mac_addr,
+ .mac_addr_remove = dpaa2_dev_remove_mac_addr,
+ .mac_addr_set = dpaa2_dev_set_mac_addr,
};
static int
@@ -760,8 +1339,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
struct dpni_attr attr;
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
struct dpni_buffer_layout layout;
- int i, ret, hw_id;
- int tot_size;
+ int ret, hw_id;
PMD_INIT_FUNC_TRACE();
@@ -773,7 +1351,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
hw_id = dpaa2_dev->object_id;
- dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
+ dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
if (!dpni_dev) {
PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
return -1;
@@ -782,43 +1360,45 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
dpni_dev->regs = rte_mcp_ptr_list[0];
ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
- " error code %d\n", hw_id, ret);
+ PMD_INIT_LOG(ERR,
+ "Failure in opening dpni@%d with err code %d\n",
+ hw_id, ret);
+ rte_free(dpni_dev);
return -1;
}
/* Clean the device first */
ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
- " error code %d\n", hw_id, ret);
- return -1;
+ PMD_INIT_LOG(ERR,
+ "Failure cleaning dpni@%d with err code %d\n",
+ hw_id, ret);
+ goto init_err;
}
ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
- " error code %d\n", hw_id, ret);
- return -1;
+ PMD_INIT_LOG(ERR,
+ "Failure in get dpni@%d attribute, err code %d\n",
+ hw_id, ret);
+ goto init_err;
}
priv->num_tc = attr.num_tcs;
- for (i = 0; i < attr.num_tcs; i++) {
- priv->num_dist_per_tc[i] = attr.num_queues;
- break;
- }
- /* Distribution is per Tc only,
- * so choosing RX queues from default TC only
+ /* Resetting the "num_rx_vqueues" to equal number of queues in first TC
+ * as only one TC is supported on Rx Side. Once Multiple TCs will be
+ * in use for Rx processing then this will be changed or removed.
*/
- priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
+ priv->nb_rx_queues = attr.num_queues;
- if (attr.num_tcs == 1)
- priv->nb_tx_queues = attr.num_queues;
- else
- priv->nb_tx_queues = attr.num_tcs;
+ /* TODO:Using hard coded value for number of TX queues due to dependency
+ * in MC.
+ */
+ priv->nb_tx_queues = 8;
- PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
+ PMD_INIT_LOG(DEBUG, "num TC - RX %d", priv->num_tc);
+ PMD_INIT_LOG(DEBUG, "nb_tx_queues %d", priv->nb_tx_queues);
PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
priv->hw = dpni_dev;
@@ -832,51 +1412,27 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpaa2_alloc_rx_tx_queues(eth_dev);
if (ret) {
PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
- return -ret;
+ goto init_err;
}
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("dpni",
ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
- "store MAC addresses",
- ETHER_ADDR_LEN * attr.mac_filter_entries);
- return -ENOMEM;
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * attr.mac_filter_entries);
+ ret = -ENOMEM;
+ goto init_err;
}
ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
priv->token,
(uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
if (ret) {
- PMD_INIT_LOG(ERR, "DPNI get mac address failed:"
- " Error Code = %d\n", ret);
- return -ret;
- }
-
- /* ... rx buffer layout ... */
- tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
- tot_size = RTE_ALIGN_CEIL(tot_size,
- DPAA2_PACKET_LAYOUT_ALIGN);
-
- memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
- DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
- DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
- DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-
- layout.pass_frame_status = 1;
- layout.data_head_room = tot_size
- - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
- layout.private_data_size = DPAA2_FD_PTA_SIZE;
- layout.pass_parser_result = 1;
- PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d",
- tot_size, layout.data_head_room, layout.private_data_size);
- ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_RX, &layout);
- if (ret) {
- PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret);
- return -1;
+ PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
+ ret);
+ goto init_err;
}
/* ... tx buffer layout ... */
@@ -886,9 +1442,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer"
- " layout", ret);
- return -1;
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
+ ret);
+ goto init_err;
}
/* ... tx-conf and error buffer layout ... */
@@ -898,19 +1454,21 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer"
- " layout", ret);
- return -1;
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
+ ret);
+ goto init_err;
}
eth_dev->dev_ops = &dpaa2_ethdev_ops;
- eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
- eth_dev->rx_pkt_burst = dpaa2_dev_rx;
+ eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
rte_fslmc_vfio_dmamap();
return 0;
+init_err:
+ dpaa2_dev_uninit(eth_dev);
+ return ret;
}
static int
@@ -924,7 +1482,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ return 0;
if (!dpni) {
PMD_INIT_LOG(WARNING, "Already closed or not started");
@@ -945,22 +1503,23 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
priv->rx_vq[0] = NULL;
}
- /* Allocate memory for storing MAC addresses */
+ /* free memory for storing MAC addresses */
if (eth_dev->data->mac_addrs) {
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
}
- /*Close the device at underlying layer*/
+ /* Close the device at underlying layer*/
ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure closing dpni device with"
- " error code %d\n", ret);
+ PMD_INIT_LOG(ERR,
+ "Failure closing dpni device with err code %d\n",
+ ret);
}
- /*Free the allocated memory for ethernet private data and dpni*/
+ /* Free the allocated memory for ethernet private data and dpni*/
priv->hw = NULL;
- free(dpni);
+ rte_free(dpni);
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
@@ -970,21 +1529,16 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
}
static int
-rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
+rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_eth_dev *eth_dev;
- char ethdev_name[RTE_ETH_NAME_MAX_LEN];
-
int diag;
- sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
-
- eth_dev = rte_eth_dev_allocate(ethdev_name);
- if (eth_dev == NULL)
- return -ENOMEM;
-
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
eth_dev->data->dev_private = rte_zmalloc(
"ethdev private structure",
sizeof(struct dpaa2_dev_priv),
@@ -995,8 +1549,15 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
rte_eth_dev_release_port(eth_dev);
return -ENOMEM;
}
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
}
+
eth_dev->device = &dpaa2_dev->device;
+ eth_dev->device->driver = &dpaa2_drv->driver;
+
dpaa2_dev->eth_dev = eth_dev;
eth_dev->data->rx_mbuf_alloc_failed = 0;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 7196398f..a2902da2 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,9 +47,32 @@
/*default tc to be used for ,congestion, distribution etc configuration. */
#define DPAA2_DEF_TC 0
+/* Threshold for a Tx queue to *Enter* Congestion state.
+ */
+#define CONG_ENTER_TX_THRESHOLD 512
+
+/* Threshold for a queue to *Exit* Congestion state.
+ */
+#define CONG_EXIT_TX_THRESHOLD 480
+
+#define CONG_RETRY_COUNT 18000
+
+/* RX queue tail drop threshold
+ * currently considering 32 KB packets
+ */
+#define CONG_THRESHOLD_RX_Q (64 * 1024)
+
/* Size of the input SMMU mapped memory required by MC */
#define DIST_PARAM_IOVA_SIZE 256
+/* Enable TX Congestion control support
+ * default is disable
+ */
+#define DPAA2_TX_CGR_OFF 0x01
+
+/* Disable RX tail drop, default is enable */
+#define DPAA2_RX_TAILDROP_OFF 0x04
+
struct dpaa2_dev_priv {
void *hw;
int32_t hw_id;
@@ -62,7 +85,6 @@ struct dpaa2_dev_priv {
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
uint32_t options;
- uint16_t num_dist_per_tc[MAX_TCS];
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_tc;
@@ -77,7 +99,8 @@ int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
-uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts);
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
-
+uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index c5d49cbe..3c057a39 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,7 +40,6 @@
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <fslmc_logs.h>
#include <fslmc_vfio.h>
@@ -133,6 +132,66 @@ dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
}
static inline struct rte_mbuf *__attribute__((hot))
+eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct qbman_sge *sgt, *sge;
+ dma_addr_t sg_addr;
+ int i = 0;
+ uint64_t fd_addr;
+ struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
+
+ fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+ /* Get Scatter gather table address */
+ sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
+
+ sge = &sgt[i++];
+ sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
+
+ /* First Scatter gather entry */
+ first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ /* Prepare all the metadata for first segment */
+ first_seg->buf_addr = (uint8_t *)sg_addr;
+ first_seg->ol_flags = 0;
+ first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
+ first_seg->data_len = sge->length & 0x1FFFF;
+ first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
+ first_seg->nb_segs = 1;
+ first_seg->next = NULL;
+
+ first_seg->packet_type = dpaa2_dev_rx_parse(
+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE);
+ dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE, first_seg);
+ rte_mbuf_refcnt_set(first_seg, 1);
+ cur_seg = first_seg;
+ while (!DPAA2_SG_IS_FINAL(sge)) {
+ sge = &sgt[i++];
+ sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FLE_ADDR(sge));
+ next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
+ next_seg->buf_addr = (uint8_t *)sg_addr;
+ next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
+ next_seg->data_len = sge->length & 0x1FFFF;
+ first_seg->nb_segs += 1;
+ rte_mbuf_refcnt_set(next_seg, 1);
+ cur_seg->next = next_seg;
+ next_seg->next = NULL;
+ cur_seg = next_seg;
+ }
+ temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ rte_mbuf_refcnt_set(temp, 1);
+ rte_pktmbuf_free_seg(temp);
+
+ return (void *)first_seg;
+}
+
+static inline struct rte_mbuf *__attribute__((hot))
eth_fd_to_mbuf(const struct qbman_fd *fd)
{
struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
@@ -171,6 +230,80 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
return mbuf;
}
+static int __attribute__ ((noinline)) __attribute__((hot))
+eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
+ struct qbman_sge *sgt, *sge = NULL;
+ int i;
+
+ /* First Prepare FD to be transmited*/
+ /* Resetting the buffer pool id and offset field*/
+ fd->simple.bpid_offset = 0;
+
+ temp = rte_pktmbuf_alloc(mbuf->pool);
+ if (temp == NULL) {
+ PMD_TX_LOG(ERR, "No memory to allocate S/G table");
+ return -ENOMEM;
+ }
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
+ DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
+ DPAA2_SET_FD_OFFSET(fd, temp->data_off);
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+ DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
+ /*Set Scatter gather table and Scatter gather entries*/
+ sgt = (struct qbman_sge *)(
+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_GET_FD_OFFSET(fd));
+
+ for (i = 0; i < mbuf->nb_segs; i++) {
+ sge = &sgt[i];
+ /*Resetting the buffer pool id and offset field*/
+ sge->fin_bpid_offset = 0;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
+ DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
+ sge->length = cur_seg->data_len;
+ if (RTE_MBUF_DIRECT(cur_seg)) {
+ if (rte_mbuf_refcnt_read(cur_seg) > 1) {
+ /* If refcnt > 1, invalid bpid is set to ensure
+ * buffer is not freed by HW
+ */
+ DPAA2_SET_FLE_IVP(sge);
+ rte_mbuf_refcnt_update(cur_seg, -1);
+ } else
+ DPAA2_SET_FLE_BPID(sge,
+ mempool_to_bpid(cur_seg->pool));
+ cur_seg = cur_seg->next;
+ } else {
+ /* Get owner MBUF from indirect buffer */
+ mi = rte_mbuf_from_indirect(cur_seg);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /* If refcnt > 1, invalid bpid is set to ensure
+ * owner buffer is not freed by HW
+ */
+ DPAA2_SET_FLE_IVP(sge);
+ } else {
+ DPAA2_SET_FLE_BPID(sge,
+ mempool_to_bpid(mi->pool));
+ rte_mbuf_refcnt_update(mi, 1);
+ }
+ prev_seg = cur_seg;
+ cur_seg = cur_seg->next;
+ prev_seg->next = NULL;
+ rte_pktmbuf_free(prev_seg);
+ }
+ }
+ DPAA2_SG_SET_FINAL(sge, true);
+ return 0;
+}
+
+static void
+eth_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
+
static void __attribute__ ((noinline)) __attribute__((hot))
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
@@ -190,8 +323,22 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
-}
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ if (rte_mbuf_refcnt_read(mbuf) > 1) {
+ DPAA2_SET_FD_IVP(fd);
+ rte_mbuf_refcnt_update(mbuf, -1);
+ }
+ } else {
+ struct rte_mbuf *mi;
+ mi = rte_mbuf_from_indirect(mbuf);
+ if (rte_mbuf_refcnt_read(mi) > 1)
+ DPAA2_SET_FD_IVP(fd);
+ else
+ rte_mbuf_refcnt_update(mi, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
static inline int __attribute__((hot))
eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
@@ -242,17 +389,18 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
}
uint16_t
-dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
- /* Function is responsible to receive frames for a given device and VQ*/
+ /* Function receive frames for a given device and VQ*/
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
struct qbman_result *dq_storage;
uint32_t fqid = dpaa2_q->fqid;
int ret, num_rx = 0;
uint8_t is_last = 0, status;
struct qbman_swp *swp;
- const struct qbman_fd *fd;
+ const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
struct qbman_pull_desc pulldesc;
+ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
struct rte_eth_dev *dev = dpaa2_q->dev;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
@@ -263,44 +411,51 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- dq_storage = dpaa2_q->q_storage->dq_storage[0];
-
- qbman_pull_desc_clear(&pulldesc);
- qbman_pull_desc_set_numframes(&pulldesc,
- (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_pkts);
- qbman_pull_desc_set_fq(&pulldesc, fqid);
- /* todo optimization - we can have dq_storage_phys available*/
- qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ if (!q_storage->active_dqs) {
+ q_storage->toggle = 0;
+ dq_storage = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-
- /*Issue a volatile dequeue command. */
- while (1) {
- if (qbman_swp_pull(swp, &pulldesc)) {
- PMD_RX_LOG(ERR, "VDQ command is not issued."
- "QBMAN is busy\n");
- /* Portal was busy, try again */
- continue;
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ while (!qbman_check_command_complete(swp,
+ get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
}
- break;
- };
-
- /* Receive the packets till Last Dequeue entry is found with
- * respect to the above issues PULL command.
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ PMD_RX_LOG(WARNING, "VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
+ }
+ dq_storage = q_storage->active_dqs;
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
*/
+ while (!qbman_check_command_complete(swp, dq_storage))
+ ;
+ if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
+ clear_swp_active_dqs(q_storage->active_dpio_id);
while (!is_last) {
- struct rte_mbuf *mbuf;
- /*Check if the previous issued command is completed.
- * Also seems like the SWP is shared between the
- * Ethernet Driver and the SEC driver.
- */
- while (!qbman_check_command_complete(swp, dq_storage))
- ;
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
while (!qbman_result_has_new_result(swp, dq_storage))
;
+ rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
*/
@@ -311,27 +466,54 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
continue;
}
+ fd[num_rx] = qbman_result_DQ_fd(dq_storage);
- fd = qbman_result_DQ_fd(dq_storage);
- mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FD_ADDR(fd)
- - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
- /* Prefeth mbuf */
- rte_prefetch0(mbuf);
/* Prefetch Annotation address for the parse results */
- rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd)
- + DPAA2_FD_PTA_SIZE + 16));
+ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
+ + DPAA2_FD_PTA_SIZE + 16));
- bufs[num_rx] = eth_fd_to_mbuf(fd);
+ if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
+ bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
+ else
+ bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
bufs[num_rx]->port = dev->data->port_id;
- num_rx++;
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ rte_vlan_strip(bufs[num_rx]);
+
dq_storage++;
- } /* End of Packet Rx loop */
+ num_rx++;
+ }
+
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ while (!qbman_check_command_complete(swp,
+ get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ ;
+ clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ }
+ q_storage->toggle ^= 1;
+ dq_storage = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ /* Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ PMD_RX_LOG(WARNING, "VDQ command is not issued."
+ "QBMAN is busy\n");
+ continue;
+ }
+ break;
+ }
+ q_storage->active_dqs = dq_storage;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
dpaa2_q->rx_pkts += num_rx;
- /*Return the total number of packets received to DPAA2 app*/
+ /* Return the total number of packets received to DPAA2 app */
return num_rx;
}
@@ -342,9 +524,10 @@ uint16_t
dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
/* Function to transmit the frames to given device and VQ*/
- uint32_t loop;
+ uint32_t loop, retry_count;
int32_t ret;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ struct rte_mbuf *mi;
uint32_t frames_to_send;
struct rte_mempool *mp;
struct qbman_eq_desc eqdesc;
@@ -375,14 +558,32 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/*Clear the unused FD fields before sending*/
while (nb_pkts) {
+ /*Check if the queue is congested*/
+ retry_count = 0;
+ if (qbman_result_SCN_state_in_mem(dpaa2_q->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto skip_tx;
+ }
+
frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
for (loop = 0; loop < frames_to_send; loop++) {
fd_arr[loop].simple.frc = 0;
DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
- mp = (*bufs)->pool;
+ if (RTE_MBUF_DIRECT(*bufs)) {
+ mp = (*bufs)->pool;
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
/* Not a hw_pkt pool allocated frame */
+ if (!mp) {
+ PMD_TX_LOG(ERR, "err: no bpool attached");
+ goto skip_tx;
+ }
if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
PMD_TX_LOG(ERR, "non hw offload bufffer ");
/* alloc should be from the default buffer pool
@@ -391,11 +592,16 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (priv->bp_list) {
bpid = priv->bp_list->buf_pool.bpid;
} else {
- PMD_TX_LOG(ERR, "errr: why no bpool"
- " attached");
+ PMD_TX_LOG(ERR,
+ "err: no bpool attached");
num_tx = 0;
goto skip_tx;
}
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ PMD_TX_LOG(ERR, "S/G support not added"
+ " for non hw offload buffer");
+ goto skip_tx;
+ }
if (eth_copy_mbuf_to_fd(*bufs,
&fd_arr[loop], bpid)) {
bufs++;
@@ -403,7 +609,14 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
} else {
bpid = mempool_to_bpid(mp);
- eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop], bpid))
+ goto skip_tx;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
}
bufs++;
}
@@ -420,3 +633,28 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
skip_tx:
return num_tx;
}
+
+/**
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ (void)queue;
+ (void)bufs;
+ (void)nb_pkts;
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 33306140..c2d39691 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -444,6 +444,24 @@ int dpni_get_qdid(struct fsl_mc_io *mc_io,
return 0;
}
+
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_LINK_CFG(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
int dpni_get_link_state(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -509,6 +527,47 @@ int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
return 0;
}
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en);
+
+ return 0;
+}
+
int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -591,6 +650,148 @@ int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
return 0;
}
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+ cmd_flags,
+ token);
+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+ cmd_flags,
+ token);
+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int unicast,
+ int multicast)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
+ cmd_flags,
+ token);
+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_PORT_MAC_ADDR(cmd, mac_addr);
+
+ return 0;
+}
+
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ DPNI_CMD_ENABLE_VLAN_FILTER(cmd, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -626,6 +827,54 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_CONGESTION_NOTIFICATION(cmd, qtype, tc_id, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ struct dpni_congestion_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_CONGESTION_NOTIFICATION(cmd, qtype, tc_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPNI_RSP_GET_CONGESTION_NOTIFICATION(cmd, cfg);
+
+ return 0;
+}
+
int dpni_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
@@ -737,3 +986,53 @@ int dpni_reset_statistics(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_TAILDROP(cmd, cg_point, q_type, tc, q_index, taildrop);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_TAILDROP(cmd, cg_point, q_type, tc, q_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_TAILDROP(cmd, taildrop);
+
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
index 3e0f4b0e..2391e401 100644
--- a/drivers/net/dpaa2/mc/fsl_dpkg.h
+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index ef14f858..64db70dc 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -72,10 +72,7 @@ struct fsl_mc_io;
* All flows within traffic class considered; see dpni_set_queue()
*/
#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
-/**
- * Generate new flow ID; see dpni_set_queue()
- */
-#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
+
/**
* Tx traffic is always released to a buffer pool on transmit, there are no
* resources allocated to have the frames confirmed back to the source after
@@ -743,6 +740,30 @@ union dpni_statistics {
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
/**
+ * struct - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+ uint32_t rate;
+ uint64_t options;
+};
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_link_cfg *cfg);
+
+/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
@@ -800,6 +821,33 @@ int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
uint16_t token,
uint16_t *max_frame_length);
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
/**
* dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
@@ -857,6 +905,51 @@ int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
uint16_t token,
uint8_t mac_addr[6]);
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @unicast: Set to '1' to clear unicast addresses
+ * @multicast: Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int unicast,
+ int multicast);
/**
* dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
@@ -876,6 +969,60 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
uint8_t mac_addr[6]);
/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
+
+/**
+ * dpni_clear_vlan_filters() - Clear all VLAN filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
* enum dpni_dist_mode - DPNI distribution mode
* @DPNI_DIST_MODE_NONE: No distribution
* @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
@@ -961,6 +1108,16 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
uint16_t token,
uint8_t tc_id,
const struct dpni_rx_tc_dist_cfg *cfg);
+/**
+ * enum dpni_congestion_unit - DPNI congestion units
+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpni_congestion_unit {
+ DPNI_CONGESTION_UNIT_BYTES = 0,
+ DPNI_CONGESTION_UNIT_FRAMES
+};
+
/**
* enum dpni_dest - DPNI destination types
@@ -981,6 +1138,119 @@ enum dpni_dest {
DPNI_DEST_DPCON = 2
};
+/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
+ */
+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
+ */
+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
+ * contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+ enum dpni_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ uint16_t notification_mode;
+};
+
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ const struct dpni_congestion_notification_cfg *cfg);
+
+/**
+ * dpni_get_congestion_notification() - Get traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
+ struct dpni_congestion_notification_cfg *cfg);
+
/**
* struct dpni_queue - Queue structure
@@ -1077,6 +1347,8 @@ enum dpni_confirmation_mode {
* Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all
* Tx confirmations to a shared Tx conf queue. The ID of the queue when
* calling dpni_set/get_queue is -1.
+ * Tx confirmation mode can only be changed while the DPNI is disabled.
+ * Executing this command while the DPNI is enabled will return an error.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -1214,4 +1486,89 @@ int dpni_reset_statistics(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
+/**
+ * enum dpni_congestion_point - Structure representing congestion point
+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
+ * QUEUE_INDEX
+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used
+ * to define the DPNI this can be either per
+ * TC (default) or per interface
+ * (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
+ * QUEUE_INDEX is ignored if this type is used.
+ */
+enum dpni_congestion_point {
+ DPNI_CP_QUEUE,
+ DPNI_CP_GROUP,
+};
+
+/**
+ * struct dpni_taildrop - Structure representing the taildrop
+ * @enable: Indicates whether the taildrop is active or not.
+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only
+ * supports byte units, this field is ignored and
+ * assumed = 0 if CONGESTION_POINT is 0.
+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
+ * cannot be used as a valid taildrop threshold,
+ * THRESHOLD must be > 0 if the taildrop is
+ * enabled.
+ */
+struct dpni_taildrop {
+ char enable;
+ enum dpni_congestion_unit units;
+ uint32_t threshold;
+};
+
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ *
+ * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current
+ * congestion notification or early drop (WRED) configuration previously applied
+ * to the same TC.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point. DPNI_CP_QUEUE is only supported in
+ * combination with DPNI_QUEUE_RX.
+ * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX.
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution.
+ * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop);
+
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @q_type:
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT
+ * is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ uint8_t tc,
+ uint8_t q_index,
+ struct dpni_taildrop *taildrop);
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index bb92ea89..2ac397cd 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright (c) 2016 NXP.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -64,11 +64,22 @@
#define DPNI_CMDID_GET_LINK_STATE ((0x215 << 4) | (0x1))
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH ((0x216 << 4) | (0x1))
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH ((0x217 << 4) | (0x1))
+#define DPNI_CMDID_SET_LINK_CFG ((0x21a << 4) | (0x1))
+#define DPNI_CMDID_SET_MCAST_PROMISC ((0x220 << 4) | (0x1))
+#define DPNI_CMDID_GET_MCAST_PROMISC ((0x221 << 4) | (0x1))
#define DPNI_CMDID_SET_UNICAST_PROMISC ((0x222 << 4) | (0x1))
#define DPNI_CMDID_GET_UNICAST_PROMISC ((0x223 << 4) | (0x1))
#define DPNI_CMDID_SET_PRIM_MAC ((0x224 << 4) | (0x1))
#define DPNI_CMDID_GET_PRIM_MAC ((0x225 << 4) | (0x1))
+#define DPNI_CMDID_ADD_MAC_ADDR ((0x226 << 4) | (0x1))
+#define DPNI_CMDID_REMOVE_MAC_ADDR ((0x227 << 4) | (0x1))
+#define DPNI_CMDID_CLR_MAC_FILTERS ((0x228 << 4) | (0x1))
+
+#define DPNI_CMDID_ENABLE_VLAN_FILTER ((0x230 << 4) | (0x1))
+#define DPNI_CMDID_ADD_VLAN_ID ((0x231 << 4) | (0x1))
+#define DPNI_CMDID_REMOVE_VLAN_ID ((0x232 << 4) | (0x1))
+#define DPNI_CMDID_CLR_VLAN_FILTERS ((0x233 << 4) | (0x1))
#define DPNI_CMDID_SET_RX_TC_DIST ((0x235 << 4) | (0x1))
@@ -76,12 +87,16 @@
#define DPNI_CMDID_RESET_STATISTICS ((0x25E << 4) | (0x1))
#define DPNI_CMDID_GET_QUEUE ((0x25F << 4) | (0x1))
#define DPNI_CMDID_SET_QUEUE ((0x260 << 4) | (0x1))
+#define DPNI_CMDID_GET_TAILDROP ((0x261 << 4) | (0x1))
+#define DPNI_CMDID_SET_TAILDROP ((0x262 << 4) | (0x1))
#define DPNI_CMDID_GET_PORT_MAC_ADDR ((0x263 << 4) | (0x1))
#define DPNI_CMDID_GET_BUFFER_LAYOUT ((0x264 << 4) | (0x1))
#define DPNI_CMDID_SET_BUFFER_LAYOUT ((0x265 << 4) | (0x1))
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION ((0x267 << 4) | (0x1))
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION ((0x268 << 4) | (0x1))
#define DPNI_CMDID_GET_OFFLOAD ((0x26B << 4) | (0x1))
#define DPNI_CMDID_SET_OFFLOAD ((0x26C << 4) | (0x1))
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE ((0x266 << 4) | (0x1))
@@ -224,6 +239,13 @@ do { \
} while (0)
/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
do { \
MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
@@ -240,6 +262,14 @@ do { \
MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \
+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
MC_CMD_OP(cmd, 0, 0, 1, int, en)
@@ -269,6 +299,57 @@ do { \
MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
} while (0)
+#define DPNI_RSP_GET_PORT_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \
+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_ENABLE_VLAN_FILTER(cmd, en) \
+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \
+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \
+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
+
/* cmd, param, offset, width, type, arg_name */
#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
@@ -324,6 +405,33 @@ do { \
MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
} while (0)
+#define DPNI_CMD_GET_TAILDROP(cmd, cp, q_type, tc, q_index) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_congestion_point, cp); \
+ MC_CMD_OP(cmd, 0, 8, 8, enum dpni_queue_type, q_type); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, q_index); \
+} while (0)
+
+#define DPNI_RSP_GET_TAILDROP(cmd, taildrop) \
+do { \
+ MC_RSP_OP(cmd, 1, 0, 1, char, (taildrop)->enable); \
+ MC_RSP_OP(cmd, 1, 16, 8, enum dpni_congestion_unit, \
+ (taildrop)->units); \
+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, (taildrop)->threshold); \
+} while (0)
+
+#define DPNI_CMD_SET_TAILDROP(cmd, cp, q_type, tc, q_index, taildrop) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_congestion_point, cp); \
+ MC_CMD_OP(cmd, 0, 8, 8, enum dpni_queue_type, q_type); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, q_index); \
+ MC_CMD_OP(cmd, 1, 0, 1, char, (taildrop)->enable); \
+ MC_CMD_OP(cmd, 1, 16, 8, enum dpni_congestion_unit, \
+ (taildrop)->units); \
+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, (taildrop)->threshold); \
+} while (0)
#define DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode) \
MC_CMD_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
@@ -331,4 +439,38 @@ do { \
#define DPNI_RSP_GET_TX_CONFIRMATION_MODE(cmd, mode) \
MC_RSP_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
+#define DPNI_CMD_SET_CONGESTION_NOTIFICATION(cmd, qtype, tc, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, (cfg)->dest_cfg.dest_id); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (cfg)->notification_mode); \
+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, (cfg)->dest_cfg.priority); \
+ MC_CMD_OP(cmd, 1, 56, 4, enum dpni_dest, (cfg)->dest_cfg.dest_type); \
+ MC_CMD_OP(cmd, 1, 60, 2, enum dpni_congestion_unit, (cfg)->units); \
+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, (cfg)->message_iova); \
+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, (cfg)->message_ctx); \
+ MC_CMD_OP(cmd, 4, 0, 32, uint32_t, (cfg)->threshold_entry); \
+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, (cfg)->threshold_exit); \
+} while (0)
+
+#define DPNI_CMD_GET_CONGESTION_NOTIFICATION(cmd, qtype, tc) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+} while (0)
+
+#define DPNI_RSP_GET_CONGESTION_NOTIFICATION(cmd, cfg) \
+do { \
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, (cfg)->dest_cfg.dest_id); \
+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, (cfg)->notification_mode); \
+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, (cfg)->dest_cfg.priority); \
+ MC_RSP_OP(cmd, 1, 56, 4, enum dpni_dest, (cfg)->dest_cfg.dest_type); \
+ MC_RSP_OP(cmd, 1, 60, 2, enum dpni_congestion_unit, (cfg)->units); \
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (cfg)->message_iova); \
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (cfg)->message_ctx); \
+ MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (cfg)->threshold_entry); \
+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, (cfg)->threshold_exit); \
+} while (0)
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index b5592d6b..ffdf36d3 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -96,6 +96,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 8352d0a7..5668910c 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -34,6 +34,7 @@
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
#include <rte_time.h>
+#include <rte_pci.h>
#define E1000_INTEL_VENDOR_ID 0x8086
@@ -82,7 +83,7 @@
#define E1000_MAX_FLEX_FILTER_DWDS \
(E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
#define E1000_FLEX_FILTERS_MASK_SIZE \
- (E1000_MAX_FLEX_FILTER_DWDS / 4)
+ (E1000_MAX_FLEX_FILTER_DWDS / 2)
#define E1000_FHFT_QUEUEING_LEN 0x0000007F
#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
#define E1000_FHFT_QUEUEING_PRIO 0x00070000
@@ -143,6 +144,19 @@
#define EM_TX_MAX_SEG UINT8_MAX
#define EM_TX_MAX_MTU_SEG UINT8_MAX
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_82576 && (type) != e1000_i210 &&\
+ (type) != e1000_i211)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_i210 && (type) != e1000_i211)\
+ return -ENOTSUP; \
+} while (0)
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
@@ -237,13 +251,19 @@ struct e1000_2tuple_filter {
uint16_t queue; /* rx queue assigned to */
};
+/* ethertype filter structure */
+struct igb_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+};
+
/*
- * Structure to store filters' info.
+ * Structure to store filters'info.
*/
struct e1000_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
- uint16_t ethertype_filters[E1000_MAX_ETQF_FILTERS];
+ struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
uint8_t flex_mask; /* Bit mask for every used flex filter */
struct e1000_flex_filter_list flex_list;
/* Bit mask for every used 5tuple filter */
@@ -252,6 +272,8 @@ struct e1000_filter_info {
/* Bit mask for every used 2tuple filter */
uint8_t twotuple_mask;
struct e1000_2tuple_filter_list twotuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
};
/*
@@ -291,8 +313,55 @@ struct e1000_adapter {
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct e1000_adapter *)adapter)->filter)
-#define E1000_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+/* ntuple filter list structure */
+struct igb_ntuple_filter_ele {
+ TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+
+/* ethertype filter list structure */
+struct igb_ethertype_filter_ele {
+ TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+
+/* syn filter list structure */
+struct igb_eth_syn_filter_ele {
+ TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+
+/* flex filter list structure */
+struct igb_flex_filter_ele {
+ TAILQ_ENTRY(igb_flex_filter_ele) entries;
+ struct rte_eth_flex_filter filter_info;
+};
+
+/* igb_flow memory list structure */
+struct igb_flow_mem {
+ TAILQ_ENTRY(igb_flow_mem) entries;
+ struct rte_flow *flow;
+ struct rte_eth_dev *dev;
+};
+
+TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
+struct igb_ntuple_filter_list igb_filter_ntuple_list;
+TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
+struct igb_ethertype_filter_list igb_filter_ethertype_list;
+TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
+struct igb_syn_filter_list igb_filter_syn_list;
+TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
+struct igb_flex_filter_list igb_filter_flex_list;
+TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
+struct igb_flow_mem_list igb_flow_list;
+
+extern const struct rte_flow_ops igb_flow_ops;
+
/*
* RX/TX IGB function prototypes
*/
@@ -411,4 +480,24 @@ void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void igb_pf_host_uninit(struct rte_eth_dev *dev);
+void igb_filterlist_flush(struct rte_eth_dev *dev);
+int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter);
+int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter);
+void igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter);
+int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
+ uint8_t idx);
+int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter, bool add);
+int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
+ struct rte_eth_flex_filter *filter,
+ bool add);
#endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a9bd92bc..3d4ab936 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -316,7 +316,7 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
static int
eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
@@ -390,7 +390,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -593,8 +593,7 @@ eth_em_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev =
- E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
@@ -777,7 +776,7 @@ eth_em_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_disable(hw);
@@ -1041,7 +1040,7 @@ static int
eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_enable(hw);
@@ -1091,7 +1090,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -1598,7 +1597,7 @@ static int
eth_em_interrupt_action(struct rte_eth_dev *dev,
struct rte_intr_handle *intr_handle)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
@@ -1671,7 +1670,7 @@ eth_em_interrupt_handler(void *param)
eth_em_interrupt_get_status(dev);
eth_em_interrupt_action(dev, dev->intr_handle);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
}
static int
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index d18dd48e..e4f7a9fa 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -138,7 +138,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
@@ -213,9 +213,6 @@ static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
-static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add);
static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
@@ -225,9 +222,6 @@ static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
-static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
- struct rte_eth_flex_filter *filter,
- bool add);
static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter);
static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
@@ -237,17 +231,11 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
-static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
- bool add);
static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
-static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -291,6 +279,7 @@ static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
static void eth_igbvf_interrupt_handler(void *param);
static void igbvf_mbx_process(struct rte_eth_dev *dev);
+static int igb_filter_restore(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -756,11 +745,51 @@ igb_reset_swfw_lock(struct e1000_hw *hw)
return E1000_SUCCESS;
}
+/* Remove all ntuple filters of the device */
+static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple, entries);
+ rte_free(p_5tuple);
+ }
+ filter_info->fivetuple_mask = 0;
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
+ TAILQ_REMOVE(&filter_info->twotuple_list,
+ p_2tuple, entries);
+ rte_free(p_2tuple);
+ }
+ filter_info->twotuple_mask = 0;
+
+ return 0;
+}
+
+/* Remove all flex filters of the device */
+static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_flex_filter *p_flex;
+
+ while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
+ TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
+ rte_free(p_flex);
+ }
+ filter_info->flex_mask = 0;
+
+ return 0;
+}
+
static int
eth_igb_dev_init(struct rte_eth_dev *eth_dev)
{
int error = 0;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
@@ -906,12 +935,19 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
/* enable support intr */
igb_intr_enable(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct e1000_filter_info));
+
TAILQ_INIT(&filter_info->flex_list);
- filter_info->flex_mask = 0;
TAILQ_INIT(&filter_info->twotuple_list);
- filter_info->twotuple_mask = 0;
TAILQ_INIT(&filter_info->fivetuple_list);
- filter_info->fivetuple_mask = 0;
+
+ TAILQ_INIT(&igb_filter_ntuple_list);
+ TAILQ_INIT(&igb_filter_ethertype_list);
+ TAILQ_INIT(&igb_filter_syn_list);
+ TAILQ_INIT(&igb_filter_flex_list);
+ TAILQ_INIT(&igb_flow_list);
return 0;
@@ -929,6 +965,8 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
struct e1000_hw *hw;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -936,7 +974,7 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
intr_handle = &pci_dev->intr_handle;
if (adapter->stopped == 0)
@@ -960,6 +998,23 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler, eth_dev);
+ /* clear the SYN filter info */
+ filter_info->syn_info = 0;
+
+ /* clear the ethertype filters info */
+ filter_info->ethertype_mask = 0;
+ memset(filter_info->ethertype_filters, 0,
+ E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
+
+ /* remove all ntuple filters of the device */
+ igb_ntuple_filter_uninit(eth_dev);
+
+ /* remove all flex filters of the device */
+ igb_flex_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ igb_filterlist_flush(eth_dev);
+
return 0;
}
@@ -994,7 +1049,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
@@ -1071,7 +1126,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1252,7 +1307,7 @@ eth_igb_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
@@ -1417,7 +1472,9 @@ eth_igb_start(struct rte_eth_dev *dev)
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- eth_igb_lsc_interrupt_setup(dev);
+ eth_igb_lsc_interrupt_setup(dev, TRUE);
+ else
+ eth_igb_lsc_interrupt_setup(dev, FALSE);
} else {
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler,
@@ -1438,6 +1495,9 @@ eth_igb_start(struct rte_eth_dev *dev)
/* resume enabled intr since hw reset */
igb_intr_enable(dev);
+ /* restore all types filter */
+ igb_filter_restore(dev);
+
PMD_INIT_LOG(DEBUG, "<<");
return 0;
@@ -1459,13 +1519,8 @@ static void
eth_igb_stop(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct e1000_filter_info *filter_info =
- E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- struct e1000_flex_filter *p_flex;
- struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
- struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
igb_intr_disable(hw);
@@ -1494,31 +1549,6 @@ eth_igb_stop(struct rte_eth_dev *dev)
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
- /* Remove all flex filters of the device */
- while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
- TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
- rte_free(p_flex);
- }
- filter_info->flex_mask = 0;
-
- /* Remove all ntuple filters of the device */
- for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
- p_5tuple != NULL; p_5tuple = p_5tuple_next) {
- p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
- TAILQ_REMOVE(&filter_info->fivetuple_list,
- p_5tuple, entries);
- rte_free(p_5tuple);
- }
- filter_info->fivetuple_mask = 0;
- for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
- p_2tuple != NULL; p_2tuple = p_2tuple_next) {
- p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
- TAILQ_REMOVE(&filter_info->twotuple_list,
- p_2tuple, entries);
- rte_free(p_2tuple);
- }
- filter_info->twotuple_mask = 0;
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
@@ -1566,7 +1596,7 @@ eth_igb_close(struct rte_eth_dev *dev)
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_eth_link link;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
eth_igb_stop(dev);
@@ -2152,7 +2182,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2281,7 +2311,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2716,18 +2746,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- intr->mask |= E1000_ICR_LSC;
+ if (on)
+ intr->mask |= E1000_ICR_LSC;
+ else
+ intr->mask &= ~E1000_ICR_LSC;
return 0;
}
@@ -2813,7 +2848,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
@@ -2870,7 +2905,8 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
E1000_WRITE_FLUSH(hw);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
return 0;
@@ -2929,7 +2965,8 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
/* PF reset VF event */
if (in_msg == E1000_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
}
static int
@@ -3081,7 +3118,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
#define E1000_RAH_POOLSEL_SHIFT (18)
static int
eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
+ uint32_t index, uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rah;
@@ -3229,7 +3266,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret;
uint32_t intr_vector = 0;
@@ -3285,7 +3322,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
static void
igbvf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -3549,18 +3586,14 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
return 0;
}
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350 &&\
- (type) != e1000_82576)\
- return -ENOTSUP;\
-} while (0)
-
-static int
+int
eth_igb_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
uint32_t synqf, rfctl;
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
@@ -3588,6 +3621,7 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev,
synqf = 0;
}
+ filter_info->syn_info = synqf;
E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
E1000_WRITE_FLUSH(hw);
return 0;
@@ -3655,11 +3689,6 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
return ret;
}
-#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
- if ((type) != e1000_82580 && (type) != e1000_i350)\
- return -ENOSYS; \
-} while (0)
-
/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
static inline int
ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
@@ -3722,6 +3751,54 @@ igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
return NULL;
}
+/* inject a igb 2tuple filter to HW */
+static inline void
+igb_inject_2uple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
+ uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ int i;
+
+ i = filter->index;
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ ttqf |= E1000_TTQF_QUEUE_ENABLE;
+ ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
+ ttqf |= (uint32_t)(filter->filter_info.proto &
+ E1000_TTQF_PROTOCOL_MASK);
+ if (filter->filter_info.proto_mask == 0)
+ ttqf &= ~E1000_TTQF_MASK_ENABLE;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
/*
* igb_add_2tuple_filter - add a 2tuple filter
*
@@ -3737,12 +3814,9 @@ static int
igb_add_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter *filter;
- uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
- uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
int i, ret;
filter = rte_zmalloc("e1000_2tuple_filter",
@@ -3784,39 +3858,25 @@ igb_add_2tuple_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
- if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
+ igb_inject_2uple_filter(dev, filter);
+ return 0;
+}
- imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+int
+igb_delete_2tuple_filter(struct rte_eth_dev *dev,
+ struct e1000_2tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- ttqf |= E1000_TTQF_QUEUE_ENABLE;
- ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
- ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
- if (filter->filter_info.proto_mask == 0)
- ttqf &= ~E1000_TTQF_MASK_ENABLE;
+ filter_info->twotuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
+ rte_free(filter);
- /* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
- if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_URG;
- if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_ACK;
- if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_PSH;
- if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_RST;
- if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_SYN;
- if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_FIN;
- } else
- imir_ext |= E1000_IMIREXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
- E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
- E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -3835,7 +3895,6 @@ static int
igb_remove_2tuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_2tuple_filter_info filter_2tuple;
@@ -3855,16 +3914,50 @@ igb_remove_2tuple_filter(struct rte_eth_dev *dev,
return -ENOENT;
}
- filter_info->twotuple_mask &= ~(1 << filter->index);
- TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
- rte_free(filter);
+ igb_delete_2tuple_filter(dev, filter);
- E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
- E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
+/* inject a igb flex filter to HW */
+static inline void
+igb_inject_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, queueing;
+ uint32_t reg_off;
+ uint8_t i, j = 0;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
+ (E1000_WUFC_FLX0 << filter->index));
+ queueing = filter->filter_info.len |
+ (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
+ (filter->filter_info.priority <<
+ E1000_FHFT_QUEUEING_PRIO_SHIFT);
+ E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
+ queueing);
+
+ for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ filter->filter_info.dwords[++j]);
+ reg_off += sizeof(uint32_t);
+ E1000_WRITE_REG(hw, reg_off,
+ (uint32_t)filter->filter_info.mask[i]);
+ reg_off += sizeof(uint32_t) * 2;
+ ++j;
+ }
+}
+
static inline struct e1000_flex_filter *
eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
struct e1000_flex_filter_info *key)
@@ -3880,18 +3973,48 @@ eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
return NULL;
}
-static int
+/* remove a flex byte filter
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+void
+igb_remove_flex_filter(struct rte_eth_dev *dev,
+ struct e1000_flex_filter *filter)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, i;
+ uint32_t reg_off;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ if (filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
+
+ for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
+ E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
+
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc &
+ (~(E1000_WUFC_FLX0 << filter->index)));
+
+ filter_info->flex_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
+ rte_free(filter);
+}
+
+int
eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_flex_filter *flex_filter, *it;
- uint32_t wufc, queueing, mask;
- uint32_t reg_off;
- uint8_t shift, i, j = 0;
+ uint32_t mask;
+ uint8_t shift, i;
flex_filter = rte_zmalloc("e1000_flex_filter",
sizeof(struct e1000_flex_filter), 0);
@@ -3911,15 +4034,20 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
flex_filter->filter_info.mask[i] = mask;
}
- wufc = E1000_READ_REG(hw, E1000_WUFC);
+ it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
+ &flex_filter->filter_info);
+ if (it == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ rte_free(flex_filter);
+ return -ENOENT;
+ }
+ if (it != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ rte_free(flex_filter);
+ return -EEXIST;
+ }
if (add) {
- if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
- &flex_filter->filter_info) != NULL) {
- PMD_DRV_LOG(ERR, "filter exists.");
- rte_free(flex_filter);
- return -EEXIST;
- }
flex_filter->queue = filter->queue;
/*
* look for an unused flex filter index
@@ -3941,52 +4069,10 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- if (flex_filter->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(flex_filter->index);
- else
- reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
-
- E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
- (E1000_WUFC_FLX0 << flex_filter->index));
- queueing = filter->len |
- (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
- (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
- E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
- queueing);
- for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
- E1000_WRITE_REG(hw, reg_off,
- flex_filter->filter_info.dwords[j]);
- reg_off += sizeof(uint32_t);
- E1000_WRITE_REG(hw, reg_off,
- flex_filter->filter_info.dwords[++j]);
- reg_off += sizeof(uint32_t);
- E1000_WRITE_REG(hw, reg_off,
- (uint32_t)flex_filter->filter_info.mask[i]);
- reg_off += sizeof(uint32_t) * 2;
- ++j;
- }
- } else {
- it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
- &flex_filter->filter_info);
- if (it == NULL) {
- PMD_DRV_LOG(ERR, "filter doesn't exist.");
- rte_free(flex_filter);
- return -ENOENT;
- }
-
- if (it->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(it->index);
- else
- reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+ igb_inject_flex_filter(dev, flex_filter);
- for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
- E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
- E1000_WRITE_REG(hw, E1000_WUFC, wufc &
- (~(E1000_WUFC_FLX0 << it->index)));
-
- filter_info->flex_mask &= ~(1 << it->index);
- TAILQ_REMOVE(&filter_info->flex_list, it, entries);
- rte_free(it);
+ } else {
+ igb_remove_flex_filter(dev, it);
rte_free(flex_filter);
}
@@ -4190,6 +4276,64 @@ igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
return NULL;
}
+/* inject a igb 5-tuple filter to HW */
+static inline void
+igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
+ uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
+ uint8_t i;
+
+ i = filter->index;
+ ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
+ if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
+ if (filter->filter_info.dst_ip_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
+ if (filter->filter_info.src_port_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ if (filter->filter_info.proto_mask == 0)
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
+ ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
+ E1000_FTQF_QUEUE_MASK;
+ ftqf |= E1000_FTQF_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
+ E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
+ E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+
+ spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
+ E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+
+ imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
+ if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+ imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ /* tcp flags bits setting. */
+ if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
+ if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_URG;
+ if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_ACK;
+ if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_PSH;
+ if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_RST;
+ if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_SYN;
+ if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIREXT_CTRL_FIN;
+ } else {
+ imir_ext |= E1000_IMIREXT_CTRL_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+}
+
/*
* igb_add_5tuple_filter_82576 - add a 5tuple filter
*
@@ -4205,12 +4349,9 @@ static int
igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter *filter;
- uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
- uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
uint8_t i;
int ret;
@@ -4254,50 +4395,29 @@ igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
return -ENOSYS;
}
- ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
- if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
- ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
- if (filter->filter_info.dst_ip_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
- if (filter->filter_info.src_port_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
- if (filter->filter_info.proto_mask == 0)
- ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
- ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
- E1000_FTQF_QUEUE_MASK;
- ftqf |= E1000_FTQF_QUEUE_ENABLE;
- E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
- E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
- E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
+ igb_inject_5tuple_filter_82576(dev, filter);
+ return 0;
+}
- spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
- E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
+int
+igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
+ struct e1000_5tuple_filter *filter)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
- if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
- imir |= E1000_IMIR_PORT_BP;
- else
- imir &= ~E1000_IMIR_PORT_BP;
- imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
+ filter_info->fivetuple_mask &= ~(1 << filter->index);
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
- /* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
- if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_URG;
- if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_ACK;
- if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_PSH;
- if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_RST;
- if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_SYN;
- if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
- imir_ext |= E1000_IMIREXT_CTRL_FIN;
- } else
- imir_ext |= E1000_IMIREXT_CTRL_BP;
- E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
- E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
+ E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
+ E1000_FTQF_VF_BP | E1000_FTQF_MASK);
+ E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -4316,7 +4436,6 @@ static int
igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter)
{
- struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_5tuple_filter_info filter_5tuple;
@@ -4336,17 +4455,8 @@ igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
return -ENOENT;
}
- filter_info->fivetuple_mask &= ~(1 << filter->index);
- TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
- rte_free(filter);
+ igb_delete_5tuple_filter_82576(dev, filter);
- E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
- E1000_FTQF_VF_BP | E1000_FTQF_MASK);
- E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
- E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
return 0;
}
@@ -4412,7 +4522,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* - On success, zero.
* - On failure, a negative value.
*/
-static int
+int
igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
@@ -4434,7 +4544,9 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
break;
case RTE_2TUPLE_FLAGS:
case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
- if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
+ if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
+ hw->mac.type != e1000_i210 &&
+ hw->mac.type != e1000_i211)
return -ENOTSUP;
if (add)
ret = igb_add_2tuple_filter(dev, ntuple_filter);
@@ -4576,7 +4688,7 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
- if (filter_info->ethertype_filters[i] == ethertype &&
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
(filter_info->ethertype_mask & (1 << i)))
return i;
}
@@ -4585,33 +4697,35 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
static inline int
igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
- uint16_t ethertype)
+ uint16_t ethertype, uint32_t etqf)
{
int i;
for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
if (!(filter_info->ethertype_mask & (1 << i))) {
filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] = ethertype;
+ filter_info->ethertype_filters[i].ethertype = ethertype;
+ filter_info->ethertype_filters[i].etqf = etqf;
return i;
}
}
return -1;
}
-static inline int
+int
igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
uint8_t idx)
{
if (idx >= E1000_MAX_ETQF_FILTERS)
return -1;
filter_info->ethertype_mask &= ~(1 << idx);
- filter_info->ethertype_filters[idx] = 0;
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
return idx;
}
-static int
+int
igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
@@ -4651,16 +4765,15 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
if (add) {
+ etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
+ etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
+ etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
ret = igb_ethertype_filter_insert(filter_info,
- filter->ether_type);
+ filter->ether_type, etqf);
if (ret < 0) {
PMD_DRV_LOG(ERR, "ethertype filters are full.");
return -ENOSYS;
}
-
- etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
- etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
- etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
} else {
ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
@@ -4755,7 +4868,7 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -4770,6 +4883,11 @@ eth_igb_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FLEXIBLE:
ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &igb_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -5277,7 +5395,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask = 1 << queue_id;
uint32_t regval;
@@ -5350,7 +5468,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
uint32_t vec = E1000_MISC_VEC_ID;
uint32_t base = E1000_MISC_VEC_ID;
uint32_t misc_shift = 0;
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
/* won't configure msix register if no mapping is done
@@ -5422,6 +5540,84 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
E1000_WRITE_FLUSH(hw);
}
+/* restore n-tuple filter */
+static inline void
+igb_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
+ igb_inject_5tuple_filter_82576(dev, p_5tuple);
+ }
+
+ TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
+ igb_inject_2uple_filter(dev, p_2tuple);
+ }
+}
+
+/* restore SYN filter */
+static inline void
+igb_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & E1000_SYN_FILTER_ENABLE) {
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+igb_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ E1000_WRITE_REG(hw, E1000_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore flex byte filter */
+static inline void
+igb_flex_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
+ igb_inject_flex_filter(dev, flex_filter);
+ }
+}
+
+/* restore all types filter */
+static int
+igb_filter_restore(struct rte_eth_dev *dev)
+{
+ igb_ntuple_filter_restore(dev);
+ igb_ethertype_filter_restore(dev);
+ igb_syn_filter_restore(dev);
+ igb_flex_filter_restore(dev);
+
+ return 0;
+}
+
RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
new file mode 100644
index 00000000..ed2ecc40
--- /dev/null
+++ b/drivers/net/e1000/igb_flow.c
@@ -0,0 +1,1707 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "e1000_logs.h"
+#include "base/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
+ do { \
+ item = (pattern) + (index); \
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
+ (index)++; \
+ item = (pattern) + (index); \
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = (actions) + (index); \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ (index)++; \
+ act = (actions) + (index); \
+ } \
+ } while (0)
+
+#define IGB_FLEX_RAW_NUM 12
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP or SCTP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Not supported last point for range */
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ if (item->spec && item->mask) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ }
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ if (item->spec && item->mask) {
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ }
+ } else {
+ if (item->spec && item->mask) {
+ sctp_mask = (const struct rte_flow_item_sctp *)
+ item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)
+ item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+ }
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+/* a specific function for igb because the flags is specific */
+static int
+igb_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Igb doesn't support many priorities. */
+ if (filter->priority > E1000_2TUPLE_MAX_PRI) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_5TUPLE_FLAGS;
+ } else {
+ if (filter->src_ip_mask || filter->dst_ip_mask ||
+ filter->src_port_mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "only two tuple are "
+ "supported by this filter");
+ return -rte_errno;
+ }
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by ntuple filter");
+ return -rte_errno;
+ }
+ filter->flags |= RTE_2TUPLE_FLAGS;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Parse pattern */
+ index = 0;
+
+ /* The first non-void item should be MAC. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ index = 0;
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported "
+ "by ethertype filter");
+ return -rte_errno;
+ }
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+igb_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (hw->mac.type == e1000_82576) {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ } else {
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not "
+ "supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a flex byte rule.
+ * And get the flex byte filter info BTW.
+ * pattern:
+ * The first not void item must be RAW.
+ * The second not void item can be RAW or END.
+ * The third not void item can be RAW or END.
+ * The last not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * RAW relative 0 0x1
+ * offset 0 0xFFFFFFFF
+ * pattern {0x08, 0x06} {0xFF, 0xFF}
+ * RAW relative 1 0x1
+ * offset 100 0xFFFFFFFF
+ * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_flex_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_raw *raw_spec;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index, i, offset, total_offset;
+ uint32_t max_offset = 0;
+ int32_t shift, j, raw_index = 0;
+ int32_t relative[IGB_FLEX_RAW_NUM] = {0};
+ int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+item_loop:
+
+ /* the first not void item should be RAW */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ if (!raw_mask->length ||
+ !raw_mask->relative) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_mask->offset)
+ offset = raw_spec->offset;
+ else
+ offset = 0;
+
+ for (j = 0; j < raw_spec->length; j++) {
+ if (raw_mask->pattern[j] != 0xFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+ }
+
+ total_offset = 0;
+
+ if (raw_spec->relative) {
+ for (j = raw_index; j > 0; j--) {
+ total_offset += raw_offset[j - 1];
+ if (!relative[j - 1])
+ break;
+ }
+ if (total_offset + raw_spec->length + offset > max_offset)
+ max_offset = total_offset + raw_spec->length + offset;
+ } else {
+ if (raw_spec->length + offset > max_offset)
+ max_offset = raw_spec->length + offset;
+ }
+
+ if ((raw_spec->length + offset + total_offset) >
+ RTE_FLEX_FILTER_MAXLEN) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (raw_spec->relative == 0) {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[offset + j] =
+ raw_spec->pattern[j];
+ j = offset / CHAR_BIT;
+ shift = offset % CHAR_BIT;
+ } else {
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[total_offset + offset + j] =
+ raw_spec->pattern[j];
+ j = (total_offset + offset) / CHAR_BIT;
+ shift = (total_offset + offset) % CHAR_BIT;
+ }
+
+ i = 0;
+
+ for ( ; shift < CHAR_BIT; shift++) {
+ filter->mask[j] |= (0x80 >> shift);
+ i++;
+ if (i == raw_spec->length)
+ break;
+ if (shift == (CHAR_BIT - 1)) {
+ j++;
+ shift = -1;
+ }
+ }
+
+ relative[raw_index] = raw_spec->relative;
+ raw_offset[raw_index] = offset + raw_spec->length;
+ raw_index++;
+
+ /* check if the next not void item is RAW */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by flex filter");
+ return -rte_errno;
+ }
+
+ /* go back to parser */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* if the item is RAW, the content should be parse */
+ goto item_loop;
+ }
+
+ filter->len = RTE_ALIGN(max_offset, 8);
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ filter->priority = (uint16_t)attr->priority;
+
+ return 0;
+}
+
+static int
+igb_parse_flex_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_flex_filter *filter,
+ struct rte_flow_error *error)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_flex_filter(attr, pattern,
+ actions, filter, error);
+
+ if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue number not supported by flex filter");
+ return -rte_errno;
+ }
+
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
+ filter->len % sizeof(uint64_t) != 0) {
+ PMD_DRV_LOG(ERR, "filter's length is out of range");
+ return -EINVAL;
+ }
+
+ if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
+ PMD_DRV_LOG(ERR, "filter's priority is out of range");
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Create a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+igb_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ struct rte_flow *flow = NULL;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+
+ flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
+ sizeof(struct igb_flow_mem), 0);
+ if (!igb_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ igb_flow_mem_ptr->flow = flow;
+ igb_flow_mem_ptr->dev = dev;
+ TAILQ_INSERT_TAIL(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
+ sizeof(struct igb_ntuple_filter_ele), 0);
+ (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "igb_ethertype_filter",
+ sizeof(struct igb_ethertype_filter_ele), 0);
+ (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("igb_syn_filter",
+ sizeof(struct igb_eth_syn_filter_ele), 0);
+ (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+ if (!ret) {
+ ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
+ if (!ret) {
+ flex_filter_ptr = rte_zmalloc("igb_flex_filter",
+ sizeof(struct igb_flex_filter_ele), 0);
+ (void)rte_memcpy(&flex_filter_ptr->filter_info,
+ &flex_filter,
+ sizeof(struct rte_eth_flex_filter));
+ TAILQ_INSERT_TAIL(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ flow->rule = flex_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(igb_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by igb.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_flex_filter flex_filter;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = igb_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = igb_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = igb_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ ret = igb_parse_flex_filter(dev, attr, pattern,
+ actions, &flex_filter, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on igb. */
+static int
+igb_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ntuple_filter(dev,
+ &ntuple_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ ret = igb_add_del_ethertype_filter(dev,
+ &ethertype_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_syn_filter_set(dev,
+ &syn_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr = (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ ret = eth_igb_add_del_flex_filter(dev,
+ &flex_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr, entries);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* remove all the n-tuple filters */
+static void
+igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_5tuple_filter *p_5tuple;
+ struct e1000_2tuple_filter *p_2tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ igb_delete_5tuple_filter_82576(dev, p_5tuple);
+
+ while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
+ igb_delete_2tuple_filter(dev, p_2tuple);
+}
+
+/* remove all the ether type filters */
+static void
+igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ (void)igb_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+static void
+igb_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the flex filters */
+static void
+igb_clear_all_flex_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_flex_filter *flex_filter;
+
+ while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
+ igb_remove_flex_filter(dev, flex_filter);
+}
+
+void
+igb_filterlist_flush(struct rte_eth_dev *dev)
+{
+ struct igb_ntuple_filter_ele *ntuple_filter_ptr;
+ struct igb_ethertype_filter_ele *ethertype_filter_ptr;
+ struct igb_eth_syn_filter_ele *syn_filter_ptr;
+ struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_flow_mem *igb_flow_mem_ptr;
+ enum rte_filter_type filter_type;
+ struct rte_flow *pmd_flow;
+
+ TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
+ if (igb_flow_mem_ptr->dev == dev) {
+ pmd_flow = igb_flow_mem_ptr->flow;
+ filter_type = pmd_flow->filter_type;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr =
+ (struct igb_ntuple_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr =
+ (struct igb_ethertype_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr =
+ (struct igb_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ flex_filter_ptr =
+ (struct igb_flex_filter_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_flex_list,
+ flex_filter_ptr, entries);
+ rte_free(flex_filter_ptr);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type"
+ "(%d) not supported", filter_type);
+ break;
+ }
+ TAILQ_REMOVE(&igb_flow_list,
+ igb_flow_mem_ptr,
+ entries);
+ rte_free(igb_flow_mem_ptr->flow);
+ rte_free(igb_flow_mem_ptr);
+ }
+ }
+}
+
+/* Destroy all flow rules associated with a port on igb. */
+static int
+igb_flow_flush(struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_error *error)
+{
+ igb_clear_all_ntuple_filter(dev);
+ igb_clear_all_ethertype_filter(dev);
+ igb_clear_syn_filter(dev);
+ igb_clear_all_flex_filter(dev);
+ igb_filterlist_flush(dev);
+
+ return 0;
+}
+
+const struct rte_flow_ops igb_flow_ops = {
+ .validate = igb_flow_validate,
+ .create = igb_flow_create,
+ .destroy = igb_flow_destroy,
+ .flush = igb_flow_flush,
+};
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 923c78a1..6809d30c 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -57,7 +57,7 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
return pci_dev->max_vfs;
}
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 7eaebf40..71a8c1e2 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -207,6 +207,7 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
+ memset(mz->addr, 0, size); \
virt = mz->addr; \
phys = mz->phys_addr; \
} while (0)
@@ -219,6 +220,7 @@ typedef uint64_t dma_addr_t;
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, 0); \
+ memset(mz->addr, 0, size); \
virt = mz->addr; \
} while (0)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 806073ca..80ce1f35 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -213,12 +213,12 @@ static void ena_tx_queue_release(void *queue);
static void ena_rx_queue_release_bufs(struct ena_ring *ring);
static void ena_tx_queue_release_bufs(struct ena_ring *ring);
static int ena_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static int ena_queue_restart(struct ena_ring *ring);
static int ena_queue_restart_all(struct rte_eth_dev *dev,
enum ena_ring_type ring_type);
static void ena_stats_restart(struct rte_eth_dev *dev);
-static void ena_infos_get(__rte_unused struct rte_eth_dev *dev,
+static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -689,11 +689,10 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
static void ena_tx_queue_release_bufs(struct ena_ring *ring)
{
- unsigned int ring_mask = ring->ring_size - 1;
+ unsigned int i;
- while (ring->next_to_clean != ring->next_to_use) {
- struct ena_tx_buffer *tx_buf =
- &ring->tx_buffer_info[ring->next_to_clean & ring_mask];
+ for (i = 0; i < ring->ring_size; ++i) {
+ struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
if (tx_buf->mbuf)
rte_pktmbuf_free(tx_buf->mbuf);
@@ -1289,7 +1288,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
adapter->pdev = pci_dev;
PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
@@ -1451,7 +1450,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
@@ -1772,6 +1771,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Free whole mbuf chain */
mbuf = tx_info->mbuf;
rte_pktmbuf_free(mbuf);
+ tx_info->mbuf = NULL;
/* Put back descriptor to the ring for reuse */
tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 2c7496dc..db48ff2d 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -56,6 +56,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_wq.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_dev.c
diff --git a/drivers/net/enic/base/cq_enet_desc.h b/drivers/net/enic/base/cq_enet_desc.h
index f9822a45..e8410563 100644
--- a/drivers/net/enic/base/cq_enet_desc.h
+++ b/drivers/net/enic/base/cq_enet_desc.h
@@ -71,6 +71,19 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_clsf_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le16 filter_id;
+ __le16 lif;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 1cd031ac..49b36555 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -387,17 +387,24 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
- u64 *a0, u64 *a1, int wait)
+ u64 *args, int nargs, int wait)
{
u32 status;
int err;
+ /*
+ * Proxy command consumes 2 arguments. One for proxy index,
+ * the other is for command to be proxied
+ */
+ if (nargs > VNIC_DEVCMD_NARGS - 2) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
memset(vdev->args, 0, sizeof(vdev->args));
vdev->args[0] = vdev->proxy_index;
vdev->args[1] = cmd;
- vdev->args[2] = *a0;
- vdev->args[3] = *a1;
+ memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
if (err)
@@ -412,24 +419,26 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
return err;
}
- *a0 = vdev->args[1];
- *a1 = vdev->args[2];
+ memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
return 0;
}
static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
- enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+ enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
{
int err;
- vdev->args[0] = *a0;
- vdev->args[1] = *a1;
+ if (nargs > VNIC_DEVCMD_NARGS) {
+ pr_err("number of args %d exceeds the maximum\n", nargs);
+ return -EINVAL;
+ }
+ memset(vdev->args, 0, sizeof(vdev->args));
+ memcpy(vdev->args, args, nargs * sizeof(args[0]));
err = _vnic_dev_cmd(vdev, cmd, wait);
- *a0 = vdev->args[0];
- *a1 = vdev->args[1];
+ memcpy(args, vdev->args, nargs * sizeof(args[0]));
return err;
}
@@ -455,24 +464,64 @@ void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
+ u64 args[2];
+ int err;
+
+ args[0] = *a0;
+ args[1] = *a1;
memset(vdev->args, 0, sizeof(vdev->args));
switch (vdev->proxy) {
case PROXY_BY_INDEX:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_BY_BDF:
+ err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ args, ARRAY_SIZE(args), wait);
+ break;
+ case PROXY_NONE:
+ default:
+ err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
+ break;
+ }
+
+ if (err == 0) {
+ *a0 = args[0];
+ *a1 = args[1];
+ }
+
+ return err;
+}
+
+int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait)
+{
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
- a0, a1, wait);
+ args, nargs, wait);
case PROXY_BY_BDF:
return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
- a0, a1, wait);
+ args, nargs, wait);
case PROXY_NONE:
default:
- return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+ return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
}
}
+static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
+ int nargs)
+{
+ memset(args, 0, nargs * sizeof(*args));
+ args[0] = CMD_ADD_ADV_FILTER;
+ args[1] = FILTER_CAP_MODE_V1_FLAG;
+ return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
+}
+
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
{
- u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0;
+ u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
int wait = 1000;
int err;
@@ -482,7 +531,65 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
return (a1 >= (u32)FILTER_DPDK_1);
}
-static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
+/* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
+ * value or 0 on error:
+ * FILTER_DPDK_1- advanced filters availabile
+ * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
+ * the IP layer must explicitly specified. I.e. cannot have a UDP
+ * filter that matches both IPv4 and IPv6.
+ * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
+ * all other filter types are not available.
+ * Retrun true in filter_tags if supported
+ */
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_tags)
+{
+ u64 args[4];
+ int err;
+ u32 max_level = 0;
+
+ err = vnic_dev_advanced_filters_cap(vdev, args, 4);
+
+ /* determine if filter tags are available */
+ if (err)
+ *filter_tags = 0;
+ if ((args[2] == FILTER_CAP_MODE_V1) &&
+ (args[3] & FILTER_ACTION_FILTER_ID_FLAG))
+ *filter_tags = 1;
+ else
+ *filter_tags = 0;
+
+ if (err || ((args[0] == 1) && (args[1] == 0))) {
+ /* Adv filter Command not supported or adv filters available but
+ * not enabled. Try the normal filter capability command.
+ */
+ args[0] = CMD_ADD_FILTER;
+ args[1] = 0;
+ err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
+ if (err)
+ return err;
+ max_level = args[1];
+ goto parse_max_level;
+ } else if (args[2] == FILTER_CAP_MODE_V1) {
+ /* parse filter capability mask in args[1] */
+ if (args[1] & FILTER_DPDK_1_FLAG)
+ *mode = FILTER_DPDK_1;
+ else if (args[1] & FILTER_USNIC_IP_FLAG)
+ *mode = FILTER_USNIC_IP;
+ else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+ }
+ max_level = args[1];
+parse_max_level:
+ if (max_level >= (u32)FILTER_USNIC_IP)
+ *mode = FILTER_USNIC_IP;
+ else
+ *mode = FILTER_IPV4_5TUPLE;
+ return 0;
+}
+
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
int wait = 1000;
@@ -1017,32 +1124,30 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
* In case of DEL filter, the caller passes the RQ number. Return
* value is irrelevant.
* @data: filter data
+ * @action: action data
*/
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter_v2 *data)
+ struct filter_v2 *data, struct filter_action_v2 *action_v2)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
dma_addr_t tlv_pa;
int ret = -EINVAL;
struct filter_tlv *tlv, *tlv_va;
- struct filter_action *action;
u64 tlv_size;
- u32 filter_size;
+ u32 filter_size, action_size;
static unsigned int unique_id;
char z_name[RTE_MEMZONE_NAMESIZE];
enum vnic_devcmd_cmd dev_cmd;
-
if (cmd == CLSF_ADD) {
- if (data->type == FILTER_DPDK_1)
- dev_cmd = CMD_ADD_ADV_FILTER;
- else
- dev_cmd = CMD_ADD_FILTER;
+ dev_cmd = (data->type >= FILTER_DPDK_1) ?
+ CMD_ADD_ADV_FILTER : CMD_ADD_FILTER;
filter_size = vnic_filter_size(data);
- tlv_size = filter_size +
- sizeof(struct filter_action) +
+ action_size = vnic_action_size(action_v2);
+
+ tlv_size = filter_size + action_size +
2*sizeof(struct filter_tlv);
snprintf((char *)z_name, sizeof(z_name),
"vnic_clsf_%d", unique_id++);
@@ -1063,11 +1168,8 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
filter_size);
tlv->type = CLSF_TLV_ACTION;
- tlv->length = sizeof(struct filter_action);
- action = (struct filter_action *)&tlv->val;
- action->type = FILTER_ACTION_RQ_STEERING;
- action->u.rq_idx = *entry;
-
+ tlv->length = action_size;
+ memcpy(&tlv->val, (void *)action_v2, action_size);
ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
*entry = (u16)a0;
vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 06ebd4ce..9a9e6917 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -135,6 +135,9 @@ void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
+int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
+int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
+ u8 *filter_tags);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -202,7 +205,7 @@ int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter_v2 *data);
+ struct filter_v2 *data, struct filter_action_v2 *action_v2);
#ifdef ENIC_VXLAN
int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
u8 overlay, u8 config);
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 785fd6fd..05d87b91 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -92,6 +92,8 @@
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
@@ -598,12 +600,29 @@ enum vnic_devcmd_cmd {
* out: (u32) a0=filter identifier
*
* Capability query:
- * out: (u64) a0= 1 if capabliity query supported
- * (u64) a1= MAX filter type supported
+ * in: (u64) a1= supported filter capability exchange modes
+ * out: (u64) a0= 1 if capability query supported
+ * if (u64) a1 = 0: a1 = MAX filter type supported
+ * if (u64) a1 & FILTER_CAP_MODE_V1_FLAG:
+ * a1 = bitmask of supported filters
+ * a2 = FILTER_CAP_MODE_V1
+ * a3 = bitmask of supported actions
*/
CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
};
+/* Modes for exchanging advanced filter capabilities. The modes supported by
+ * the driver are passed in the CMD_ADD_ADV_FILTER capability command and the
+ * mode selected is returned.
+ * V0: the maximum filter type supported is returned
+ * V1: bitmasks of supported filters and actions are returned
+ */
+enum filter_cap_mode {
+ FILTER_CAP_MODE_V0 = 0, /* Must always be 0 for legacy drivers */
+ FILTER_CAP_MODE_V1 = 1,
+};
+#define FILTER_CAP_MODE_V1_FLAG (1 << FILTER_CAP_MODE_V1)
+
/* CMD_ENABLE2 flags */
#define CMD_ENABLE2_STANDBY 0x0
#define CMD_ENABLE2_ACTIVE 0x1
@@ -837,6 +856,7 @@ struct filter_generic_1 {
/* Specifies the filter_action type. */
enum {
FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_V2 = 1,
FILTER_ACTION_MAX
};
@@ -847,6 +867,22 @@ struct filter_action {
} u;
} __attribute__((packed));
+#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
+#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
+#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_FILTER_ID_FLAG)
+
+/* Version 2 of filter action must be a strict extension of struct filter_action
+ * where the first fields exactly match in size and meaning.
+ */
+struct filter_action_v2 {
+ u32 type;
+ u32 rq_idx;
+ u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */
+ u16 filter_id;
+ u_int8_t reserved[32]; /* for future expansion */
+} __attribute__((packed));
+
/* Specifies the filter type. */
enum filter_type {
FILTER_USNIC_ID = 0,
@@ -859,6 +895,21 @@ enum filter_type {
FILTER_MAX
};
+#define FILTER_USNIC_ID_FLAG (1 << FILTER_USNIC_ID)
+#define FILTER_IPV4_5TUPLE_FLAG (1 << FILTER_IPV4_5TUPLE)
+#define FILTER_MAC_VLAN_FLAG (1 << FILTER_MAC_VLAN)
+#define FILTER_VLAN_IP_3TUPLE_FLAG (1 << FILTER_VLAN_IP_3TUPLE)
+#define FILTER_NVGRE_VMQ_FLAG (1 << FILTER_NVGRE_VMQ)
+#define FILTER_USNIC_IP_FLAG (1 << FILTER_USNIC_IP)
+#define FILTER_DPDK_1_FLAG (1 << FILTER_DPDK_1)
+#define FILTER_V1_ALL (FILTER_USNIC_ID_FLAG | \
+ FILTER_IPV4_5TUPLE_FLAG | \
+ FILTER_MAC_VLAN_FLAG | \
+ FILTER_VLAN_IP_3TUPLE_FLAG | \
+ FILTER_NVGRE_VMQ_FLAG | \
+ FILTER_USNIC_IP_FLAG | \
+ FILTER_DPDK_1_FLAG)
+
struct filter {
u32 type;
union {
@@ -903,7 +954,7 @@ struct filter_tlv {
/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
#define FILTER_MAX_BUF_SIZE 100
#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
- sizeof(struct filter_action) + \
+ sizeof(struct filter_action_v2) + \
(2 * sizeof(struct filter_tlv)))
/*
@@ -949,6 +1000,30 @@ enum {
};
/*
+ * Get the action structure size given action type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_action_v2)"
+ * when computing length for TLV.
+ */
+static inline u_int32_t
+vnic_action_size(struct filter_action_v2 *fap)
+{
+ u_int32_t size;
+
+ switch (fap->type) {
+ case FILTER_ACTION_RQ_STEERING:
+ size = sizeof(struct filter_action);
+ break;
+ case FILTER_ACTION_V2:
+ size = sizeof(struct filter_action_v2);
+ break;
+ default:
+ size = sizeof(struct filter_action);
+ break;
+ }
+ return size;
+}
+
+/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
*
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index d17a35f4..e28f2235 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -80,6 +80,8 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
+/* Special Filter id for non-specific packet flagging. Don't change value */
+#define ENIC_MAGIC_FILTER_ID 0xffff
#define ENICPMD_FDIR_MAX 64
@@ -111,6 +113,12 @@ struct enic_memzone_entry {
LIST_ENTRY(enic_memzone_entry) entries;
};
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next;
+ u16 enic_filter_id;
+ struct filter_v2 enic_filter;
+};
+
/* Per-instance private data structure */
struct enic {
struct enic *next;
@@ -135,7 +143,9 @@ struct enic {
int link_status;
u8 hw_ip_checksum;
u16 max_mtu;
- u16 adv_filters;
+ u8 adv_filters;
+ u32 flow_filter_mode;
+ u8 filter_tags;
unsigned int flags;
unsigned int priv_flags;
@@ -170,6 +180,8 @@ struct enic {
rte_spinlock_t memzone_list_lock;
rte_spinlock_t mtu_lock;
+ LIST_HEAD(enic_flows, rte_flow) flows;
+ rte_spinlock_t flows_lock;
};
/* Get the CQ index from a Start of Packet(SOP) RQ index */
@@ -293,9 +305,9 @@ extern int enic_clsf_init(struct enic *enic);
extern void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t enic_dummy_recv_pkts(__rte_unused void *rx_queue,
- __rte_unused struct rte_mbuf **rx_pkts,
- __rte_unused uint16_t nb_pkts);
+uint16_t enic_dummy_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
@@ -303,7 +315,8 @@ int enic_link_update(struct enic *enic);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- __rte_unused struct rte_eth_fdir_masks *masks);
+ struct rte_eth_fdir_masks *masks);
void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
struct rte_eth_fdir_masks *masks);
+extern const struct rte_flow_ops enic_flow_ops;
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 487f8044..9b461424 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -57,7 +57,7 @@
#include "vnic_intr.h"
#include "vnic_nic.h"
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#ifdef RTE_ARCH_X86
#include <rte_hash_crc.h>
#define DEFAULT_HASH_FUNC rte_hash_crc
#else
@@ -345,7 +345,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
/* Delete the filter */
vnic_dev_classifier(enic->vdev, CLSF_DEL,
- &key->fltr_id, NULL);
+ &key->fltr_id, NULL, NULL);
rte_free(key);
enic->fdir.nodes[pos] = NULL;
enic->fdir.stats.free++;
@@ -365,8 +365,10 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
u32 flowtype_supported;
u16 flex_bytes;
u16 queue;
+ struct filter_action_v2 action;
memset(&fltr, 0, sizeof(fltr));
+ memset(&action, 0, sizeof(action));
flowtype_supported = enic->fdir.types_mask
& (1 << params->input.flow_type);
@@ -439,7 +441,7 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
* Delete the filter and add the modified one later
*/
vnic_dev_classifier(enic->vdev, CLSF_DEL,
- &key->fltr_id, NULL);
+ &key->fltr_id, NULL, NULL);
enic->fdir.stats.free++;
}
@@ -451,8 +453,11 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
enic->fdir.copy_fltr_fn(&fltr, &params->input,
&enic->rte_dev->data->dev_conf.fdir_conf.mask);
+ action.type = FILTER_ACTION_RQ_STEERING;
+ action.rq_idx = queue;
- if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
+ if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
+ &action)) {
key->fltr_id = queue;
} else {
dev_err(enic, "Add classifier entry failed\n");
@@ -462,7 +467,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
}
if (do_free)
- vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
+ vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
+ NULL);
else{
enic->fdir.stats.free--;
enic->fdir.stats.add++;
@@ -488,7 +494,7 @@ void enic_clsf_destroy(struct enic *enic)
key = enic->fdir.nodes[index];
if (key) {
vnic_dev_classifier(enic->vdev, CLSF_DEL,
- &key->fltr_id, NULL);
+ &key->fltr_id, NULL, NULL);
rte_free(key);
enic->fdir.nodes[index] = NULL;
}
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 331cd5ef..da8fec2d 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -116,13 +116,25 @@ enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
+
+ ENICPMD_FUNC_TRACE();
- if (RTE_ETH_FILTER_FDIR == filter_type)
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &enic_flow_ops;
+ break;
+ case RTE_ETH_FILTER_FDIR:
ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
- else
+ break;
+ default:
dev_warning(enic, "Filter type (%d) not supported",
filter_type);
+ ret = -EINVAL;
+ break;
+ }
return ret;
}
@@ -455,7 +467,7 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
@@ -619,7 +631,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
- pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
new file mode 100644
index 00000000..a728d077
--- /dev/null
+++ b/drivers/net/enic/enic_flow.c
@@ -0,0 +1,1544 @@
+/*
+ * Copyright (c) 2017, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <errno.h>
+#include <rte_log.h>
+#include <rte_ethdev.h>
+#include <rte_flow_driver.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_nic.h"
+
+#ifdef RTE_LIBRTE_ENIC_DEBUG_FLOW
+#define FLOW_TRACE() \
+ RTE_LOG(DEBUG, PMD, "%s()\n", __func__)
+#define FLOW_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, fmt, ## args)
+#else
+#define FLOW_TRACE() do { } while (0)
+#define FLOW_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+/** Info about how to copy items into enic filters. */
+struct enic_items {
+ /** Function for copying and validating an item. */
+ int (*copy_item)(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst);
+ /** List of valid previous items. */
+ const enum rte_flow_item_type * const prev_items;
+ /** True if it's OK for this item to be the first item. For some NIC
+ * versions, it's invalid to start the stack above layer 3.
+ */
+ const u8 valid_start_item;
+};
+
+/** Filtering capabilities for various NIC and firmware versions. */
+struct enic_filter_cap {
+ /** list of valid items and their handlers and attributes. */
+ const struct enic_items *item_info;
+};
+
+/* functions for copying flow actions into enic actions */
+typedef int (copy_action_fn)(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action);
+
+/* functions for copying items into enic filters */
+typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst);
+
+/** Action capabilities for various NICs. */
+struct enic_action_cap {
+ /** list of valid actions */
+ const enum rte_flow_action_type *actions;
+ /** copy function for a particular NIC */
+ int (*copy_fn)(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action);
+};
+
+/* Forward declarations */
+static enic_copy_item_fn enic_copy_item_ipv4_v1;
+static enic_copy_item_fn enic_copy_item_udp_v1;
+static enic_copy_item_fn enic_copy_item_tcp_v1;
+static enic_copy_item_fn enic_copy_item_eth_v2;
+static enic_copy_item_fn enic_copy_item_vlan_v2;
+static enic_copy_item_fn enic_copy_item_ipv4_v2;
+static enic_copy_item_fn enic_copy_item_ipv6_v2;
+static enic_copy_item_fn enic_copy_item_udp_v2;
+static enic_copy_item_fn enic_copy_item_tcp_v2;
+static enic_copy_item_fn enic_copy_item_sctp_v2;
+static enic_copy_item_fn enic_copy_item_sctp_v2;
+static enic_copy_item_fn enic_copy_item_vxlan_v2;
+static copy_action_fn enic_copy_action_v1;
+static copy_action_fn enic_copy_action_v2;
+
+/**
+ * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
+ * is supported.
+ */
+static const struct enic_items enic_items_v1[] = {
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v1,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v1,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v1,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/**
+ * NICs have Advanced Filters capability but they are disabled. This means
+ * that layer 3 must be specified.
+ */
+static const struct enic_items enic_items_v2[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .copy_item = enic_copy_item_eth_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .copy_item = enic_copy_item_vlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .copy_item = enic_copy_item_ipv6_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_SCTP] = {
+ .copy_item = enic_copy_item_sctp_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .copy_item = enic_copy_item_vxlan_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/** NICs with Advanced filters enabled */
+static const struct enic_items enic_items_v3[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .copy_item = enic_copy_item_eth_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .copy_item = enic_copy_item_vlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .copy_item = enic_copy_item_ipv4_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .copy_item = enic_copy_item_ipv6_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .copy_item = enic_copy_item_udp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .copy_item = enic_copy_item_tcp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_SCTP] = {
+ .copy_item = enic_copy_item_sctp_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .copy_item = enic_copy_item_vxlan_v2,
+ .valid_start_item = 1,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+};
+
+/** Filtering capabilities indexed this NICs supported filter type. */
+static const struct enic_filter_cap enic_filter_cap[] = {
+ [FILTER_IPV4_5TUPLE] = {
+ .item_info = enic_items_v1,
+ },
+ [FILTER_USNIC_IP] = {
+ .item_info = enic_items_v2,
+ },
+ [FILTER_DPDK_1] = {
+ .item_info = enic_items_v3,
+ },
+};
+
+/** Supported actions for older NICs */
+static const enum rte_flow_action_type enic_supported_actions_v1[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Supported actions for newer NICs */
+static const enum rte_flow_action_type enic_supported_actions_v2[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Action capabilities indexed by NIC version information */
+static const struct enic_action_cap enic_action_cap[] = {
+ [FILTER_ACTION_RQ_STEERING_FLAG] = {
+ .actions = enic_supported_actions_v1,
+ .copy_fn = enic_copy_action_v1,
+ },
+ [FILTER_ACTION_V2_ALL] = {
+ .actions = enic_supported_actions_v2,
+ .copy_fn = enic_copy_action_v2,
+ },
+};
+
+static int
+mask_exact_match(const u8 *supported, const u8 *supplied,
+ unsigned int size)
+{
+ unsigned int i;
+ for (i = 0; i < size; i++) {
+ if (supported[i] != supplied[i])
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Copy IPv4 item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct ipv4_hdr supported_mask = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+
+ /* This is an exact match filter, both fields must be set */
+ if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
+ FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "IPv4 exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_addr = spec->hdr.src_addr;
+ enic_5tup->dst_addr = spec->hdr.dst_addr;
+
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_udp_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct udp_hdr supported_mask = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+
+ /* This is an exact match filter, both ports must be set */
+ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+ FLOW_LOG(ERR, "UDP exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "UDP exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_port = spec->hdr.src_port;
+ enic_5tup->dst_port = spec->hdr.dst_port;
+ enic_5tup->protocol = PROTO_UDP;
+
+ return 0;
+}
+
+/**
+ * Copy TCP item into version 1 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Should always be 0 for version 1.
+ */
+static int
+enic_copy_item_tcp_v1(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
+ struct tcp_hdr supported_mask = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ };
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+
+ /* This is an exact match filter, both ports must be set */
+ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
+ FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
+ return ENOTSUP;
+ }
+
+ /* check that the suppied mask exactly matches capabilty */
+ if (!mask_exact_match((const u8 *)&supported_mask,
+ (const u8 *)item->mask, sizeof(*mask))) {
+ FLOW_LOG(ERR, "TCP exact match mask");
+ return ENOTSUP;
+ }
+
+ enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic_5tup->src_port = spec->hdr.src_port;
+ enic_5tup->dst_port = spec->hdr.dst_port;
+ enic_5tup->protocol = PROTO_TCP;
+
+ return 0;
+}
+
+/**
+ * Copy ETH item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * If zero, this is an outer header. If non-zero, this is the offset into L5
+ * where the header begins.
+ */
+static int
+enic_copy_item_eth_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ struct ether_hdr enic_spec;
+ struct ether_hdr enic_mask;
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+
+ memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
+ ETHER_ADDR_LEN);
+
+ memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
+ ETHER_ADDR_LEN);
+ enic_spec.ether_type = spec->type;
+ enic_mask.ether_type = mask->type;
+
+ if (*inner_ofst == 0) {
+ /* outer header */
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
+ sizeof(struct ether_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
+ sizeof(struct ether_hdr));
+ } else {
+ /* inner header */
+ if ((*inner_ofst + sizeof(struct ether_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ /* Offset into L5 where inner Ethernet header goes */
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ &enic_mask, sizeof(struct ether_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ &enic_spec, sizeof(struct ether_hdr));
+ *inner_ofst += sizeof(struct ether_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy VLAN item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * If zero, this is an outer header. If non-zero, this is the offset into L5
+ * where the header begins.
+ */
+static int
+enic_copy_item_vlan_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ /* Don't support filtering in tpid */
+ if (mask) {
+ if (mask->tpid != 0)
+ return ENOTSUP;
+ } else {
+ mask = &rte_flow_item_vlan_mask;
+ RTE_ASSERT(mask->tpid == 0);
+ }
+
+ if (*inner_ofst == 0) {
+ /* Outer header. Use the vlan mask/val fields */
+ gp->mask_vlan = mask->tci;
+ gp->val_vlan = spec->tci;
+ } else {
+ /* Inner header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct vlan_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct vlan_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct vlan_hdr));
+ *inner_ofst += sizeof(struct vlan_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy IPv4 item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner IPv4 filtering.
+ */
+static int
+enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst == 0) {
+ /* Match IPv4 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV4;
+ gp->val_flags |= FILTER_GENERIC_1_IPV4;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv4_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv4_hdr));
+ } else {
+ /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct ipv4_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct ipv4_hdr));
+ *inner_ofst += sizeof(struct ipv4_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy IPv6 item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner IPv6 filtering.
+ */
+static int
+enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match IPv6 */
+ gp->mask_flags |= FILTER_GENERIC_1_IPV6;
+ gp->val_flags |= FILTER_GENERIC_1_IPV6;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
+ sizeof(struct ipv6_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
+ sizeof(struct ipv6_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct ipv6_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct ipv6_hdr));
+ *inner_ofst += sizeof(struct ipv6_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner UDP filtering.
+ */
+static int
+enic_copy_item_udp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match UDP */
+ gp->mask_flags |= FILTER_GENERIC_1_UDP;
+ gp->val_flags |= FILTER_GENERIC_1_UDP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct udp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct udp_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct udp_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct udp_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct udp_hdr));
+ *inner_ofst += sizeof(struct udp_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy TCP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner TCP filtering.
+ */
+static int
+enic_copy_item_tcp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Match TCP */
+ gp->mask_flags |= FILTER_GENERIC_1_TCP;
+ gp->val_flags |= FILTER_GENERIC_1_TCP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ return ENOTSUP;
+
+ if (*inner_ofst == 0) {
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct tcp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct tcp_hdr));
+ } else {
+ /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
+ if ((*inner_ofst + sizeof(struct tcp_hdr)) >
+ FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
+ mask, sizeof(struct tcp_hdr));
+ memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
+ spec, sizeof(struct tcp_hdr));
+ *inner_ofst += sizeof(struct tcp_hdr);
+ }
+ return 0;
+}
+
+/**
+ * Copy SCTP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. Don't support inner SCTP filtering.
+ */
+static int
+enic_copy_item_sctp_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_sctp *spec = item->spec;
+ const struct rte_flow_item_sctp *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return ENOTSUP;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_sctp_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
+ sizeof(struct sctp_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
+ sizeof(struct sctp_hdr));
+ return 0;
+}
+
+/**
+ * Copy UDP item into version 2 NIC filter.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param enic_filter[out]
+ * Partially filled in NIC filter structure.
+ * @param inner_ofst[in]
+ * Must be 0. VxLAN headers always start at the beginning of L5.
+ */
+static int
+enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
+ struct filter_v2 *enic_filter, u8 *inner_ofst)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ if (*inner_ofst)
+ return EINVAL;
+
+ /* Match all if no spec */
+ if (!spec)
+ return 0;
+
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
+ sizeof(struct vxlan_hdr));
+ memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
+ sizeof(struct vxlan_hdr));
+
+ *inner_ofst = sizeof(struct vxlan_hdr);
+ return 0;
+}
+
+/**
+ * Return 1 if current item is valid on top of the previous one.
+ *
+ * @param prev_item[in]
+ * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
+ * is the first item.
+ * @param item_info[in]
+ * Info about this item, like valid previous items.
+ * @param is_first[in]
+ * True if this the first item in the pattern.
+ */
+static int
+item_stacking_valid(enum rte_flow_item_type prev_item,
+ const struct enic_items *item_info, u8 is_first_item)
+{
+ enum rte_flow_item_type const *allowed_items = item_info->prev_items;
+
+ FLOW_TRACE();
+
+ for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
+ if (prev_item == *allowed_items)
+ return 1;
+ }
+
+ /* This is the first item in the stack. Check if that's cool */
+ if (is_first_item && item_info->valid_start_item)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Build the intenal enic filter structure from the provided pattern. The
+ * pattern is validated as the items are copied.
+ *
+ * @param pattern[in]
+ * @param items_info[in]
+ * Info about this NICs item support, like valid previous items.
+ * @param enic_filter[out]
+ * NIC specfilc filters derived from the pattern.
+ * @param error[out]
+ */
+static int
+enic_copy_filter(const struct rte_flow_item pattern[],
+ const struct enic_items *items_info,
+ struct filter_v2 *enic_filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ const struct rte_flow_item *item = pattern;
+ u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
+ enum rte_flow_item_type prev_item;
+ const struct enic_items *item_info;
+
+ u8 is_first_item = 1;
+
+ FLOW_TRACE();
+
+ prev_item = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ /* Get info about how to validate and copy the item. If NULL
+ * is returned the nic does not support the item.
+ */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ item_info = &items_info[item->type];
+
+ /* check to see if item stacking is valid */
+ if (!item_stacking_valid(prev_item, item_info, is_first_item))
+ goto stacking_error;
+
+ ret = item_info->copy_item(item, enic_filter, &inner_ofst);
+ if (ret)
+ goto item_not_supported;
+ prev_item = item->type;
+ is_first_item = 0;
+ }
+ return 0;
+
+item_not_supported:
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "enic type error");
+ return -rte_errno;
+
+stacking_error:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "stacking error");
+ return -rte_errno;
+}
+
+/**
+ * Build the intenal version 1 NIC action structure from the provided pattern.
+ * The pattern is validated as the items are copied.
+ *
+ * @param actions[in]
+ * @param enic_action[out]
+ * NIC specfilc actions derived from the actions.
+ * @param error[out]
+ */
+static int
+enic_copy_action_v1(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action)
+{
+ FLOW_TRACE();
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE: {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ enic_action->rq_idx =
+ enic_rte_rq_idx_to_sop_idx(queue->index);
+ break;
+ }
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+ enic_action->type = FILTER_ACTION_RQ_STEERING;
+ return 0;
+}
+
+/**
+ * Build the intenal version 2 NIC action structure from the provided pattern.
+ * The pattern is validated as the items are copied.
+ *
+ * @param actions[in]
+ * @param enic_action[out]
+ * NIC specfilc actions derived from the actions.
+ * @param error[out]
+ */
+static int
+enic_copy_action_v2(const struct rte_flow_action actions[],
+ struct filter_action_v2 *enic_action)
+{
+ FLOW_TRACE();
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE: {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ enic_action->rq_idx =
+ enic_rte_rq_idx_to_sop_idx(queue->index);
+ enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK: {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
+ * in the range of allows mark ids.
+ */
+ if (mark->id >= ENIC_MAGIC_FILTER_ID)
+ return EINVAL;
+ enic_action->filter_id = mark->id;
+ enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_FLAG: {
+ enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
+ enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+ enic_action->type = FILTER_ACTION_V2;
+ return 0;
+}
+
+/** Check if the action is supported */
+static int
+enic_match_action(const struct rte_flow_action *action,
+ const enum rte_flow_action_type *supported_actions)
+{
+ for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
+ supported_actions++) {
+ if (action->type == *supported_actions)
+ return 1;
+ }
+ return 0;
+}
+
+/** Get the NIC filter capabilties structure */
+static const struct enic_filter_cap *
+enic_get_filter_cap(struct enic *enic)
+{
+ if (enic->flow_filter_mode)
+ return &enic_filter_cap[enic->flow_filter_mode];
+
+ return NULL;
+}
+
+/** Get the actions for this NIC version. */
+static const struct enic_action_cap *
+enic_get_action_cap(struct enic *enic)
+{
+ static const struct enic_action_cap *ea;
+
+ if (enic->filter_tags)
+ ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
+ else
+ ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
+ return ea;
+}
+
+/* Debug function to dump internal NIC action structure. */
+static void
+enic_dump_actions(const struct filter_action_v2 *ea)
+{
+ if (ea->type == FILTER_ACTION_RQ_STEERING) {
+ FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
+ } else if (ea->type == FILTER_ACTION_V2) {
+ FLOW_LOG(INFO, "Actions(V2)\n");
+ if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
+ FLOW_LOG(INFO, "\tqueue: %u\n",
+ enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
+ if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
+ FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
+ }
+}
+
+/* Debug function to dump internal NIC filter structure. */
+static void
+enic_dump_filter(const struct filter_v2 *filt)
+{
+ const struct filter_generic_1 *gp;
+ int i, j, mbyte;
+ char buf[128], *bp;
+ char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
+ char l4csum[16], ipfrag[16];
+
+ switch (filt->type) {
+ case FILTER_IPV4_5TUPLE:
+ FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ /* FIXME: this should be a loop */
+ gp = &filt->u.generic_1;
+ FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
+ gp->val_vlan, gp->mask_vlan);
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
+ sprintf(ip4, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPV4)
+ ? "ip4(y)" : "ip4(n)");
+ else
+ sprintf(ip4, "%s ", "ip4(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
+ sprintf(ip6, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPV4)
+ ? "ip6(y)" : "ip6(n)");
+ else
+ sprintf(ip6, "%s ", "ip6(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_UDP)
+ sprintf(udp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_UDP)
+ ? "udp(y)" : "udp(n)");
+ else
+ sprintf(udp, "%s ", "udp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_TCP)
+ sprintf(tcp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_TCP)
+ ? "tcp(y)" : "tcp(n)");
+ else
+ sprintf(tcp, "%s ", "tcp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+ sprintf(tcpudp, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
+ ? "tcpudp(y)" : "tcpudp(n)");
+ else
+ sprintf(tcpudp, "%s ", "tcpudp(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
+ sprintf(ip4csum, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
+ ? "ip4csum(y)" : "ip4csum(n)");
+ else
+ sprintf(ip4csum, "%s ", "ip4csum(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
+ sprintf(l4csum, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
+ ? "l4csum(y)" : "l4csum(n)");
+ else
+ sprintf(l4csum, "%s ", "l4csum(x)");
+
+ if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
+ sprintf(ipfrag, "%s ",
+ (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
+ ? "ipfrag(y)" : "ipfrag(n)");
+ else
+ sprintf(ipfrag, "%s ", "ipfrag(x)");
+ FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
+ tcp, tcpudp, ip4csum, l4csum, ipfrag);
+
+ for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
+ mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
+ while (mbyte && !gp->layer[i].mask[mbyte])
+ mbyte--;
+ if (mbyte == 0)
+ continue;
+
+ bp = buf;
+ for (j = 0; j <= mbyte; j++) {
+ sprintf(bp, "%02x",
+ gp->layer[i].mask[j]);
+ bp += 2;
+ }
+ *bp = '\0';
+ FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
+ bp = buf;
+ for (j = 0; j <= mbyte; j++) {
+ sprintf(bp, "%02x",
+ gp->layer[i].val[j]);
+ bp += 2;
+ }
+ *bp = '\0';
+ FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
+ }
+ break;
+ default:
+ FLOW_LOG(INFO, "FILTER UNKNOWN\n");
+ break;
+ }
+}
+
+/* Debug function to dump internal NIC flow structures. */
+static void
+enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
+{
+ enic_dump_filter(filt);
+ enic_dump_actions(ea);
+}
+
+
+/**
+ * Internal flow parse/validate function.
+ *
+ * @param dev[in]
+ * This device pointer.
+ * @param pattern[in]
+ * @param actions[in]
+ * @param error[out]
+ * @param enic_filter[out]
+ * Internal NIC filter structure pointer.
+ * @param enic_action[out]
+ * Internal NIC action structure pointer.
+ */
+static int
+enic_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct filter_v2 *enic_filter,
+ struct filter_action_v2 *enic_action)
+{
+ unsigned int ret = 0;
+ struct enic *enic = pmd_priv(dev);
+ const struct enic_filter_cap *enic_filter_cap;
+ const struct enic_action_cap *enic_action_cap;
+ const struct rte_flow_action *action;
+
+ FLOW_TRACE();
+
+ memset(enic_filter, 0, sizeof(*enic_filter));
+ memset(enic_action, 0, sizeof(*enic_action));
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No pattern specified");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "No action specified");
+ return -rte_errno;
+ }
+
+ if (attrs) {
+ if (attrs->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "priority groups are not supported");
+ return -rte_errno;
+ } else if (attrs->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported");
+ return -rte_errno;
+ } else if (attrs->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ } else if (!attrs->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "No attribute specified");
+ return -rte_errno;
+ }
+
+ /* Verify Actions. */
+ enic_action_cap = enic_get_action_cap(enic);
+ for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
+ action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ else if (!enic_match_action(action, enic_action_cap->actions))
+ break;
+ }
+ if (action->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "Invalid action.");
+ return -rte_errno;
+ }
+ ret = enic_action_cap->copy_fn(actions, enic_action);
+ if (ret) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unsupported action.");
+ return -rte_errno;
+ }
+
+ /* Verify Flow items. If copying the filter from flow format to enic
+ * format fails, the flow is not supported
+ */
+ enic_filter_cap = enic_get_filter_cap(enic);
+ if (enic_filter_cap == NULL) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Flow API not available");
+ return -rte_errno;
+ }
+ enic_filter->type = enic->flow_filter_mode;
+ ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
+ enic_filter, error);
+ return ret;
+}
+
+/**
+ * Push filter/action to the NIC.
+ *
+ * @param enic[in]
+ * Device structure pointer.
+ * @param enic_filter[in]
+ * Internal NIC filter structure pointer.
+ * @param enic_action[in]
+ * Internal NIC action structure pointer.
+ * @param error[out]
+ */
+static struct rte_flow *
+enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
+ struct filter_action_v2 *enic_action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ int ret;
+ u16 entry;
+
+ FLOW_TRACE();
+
+ flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+
+ /* entry[in] is the queue id, entry[out] is the filter Id for delete */
+ entry = enic_action->rq_idx;
+ ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
+ enic_action);
+ if (!ret) {
+ flow->enic_filter_id = entry;
+ flow->enic_filter = *enic_filter;
+ } else {
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "vnic_dev_classifier error");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+/**
+ * Remove filter/action from the NIC.
+ *
+ * @param enic[in]
+ * Device structure pointer.
+ * @param filter_id[in]
+ * Id of NIC filter.
+ * @param enic_action[in]
+ * Internal NIC action structure pointer.
+ * @param error[out]
+ */
+static int
+enic_flow_del_filter(struct enic *enic, u16 filter_id,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ FLOW_TRACE();
+
+ ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
+ if (!ret)
+ rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "vnic_dev_classifier failed");
+ return ret;
+}
+
+/*
+ * The following functions are callbacks for Generic flow API.
+ */
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct filter_v2 enic_filter;
+ struct filter_action_v2 enic_action;
+ int ret;
+
+ FLOW_TRACE();
+
+ ret = enic_flow_parse(dev, attrs, pattern, actions, error,
+ &enic_filter, &enic_action);
+ if (!ret)
+ enic_dump_flow(&enic_action, &enic_filter);
+ return ret;
+}
+
+/**
+ * Create a flow supported by the NIC.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+static struct rte_flow *
+enic_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attrs,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct filter_v2 enic_filter;
+ struct filter_action_v2 enic_action;
+ struct rte_flow *flow;
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
+ &enic_action);
+ if (ret < 0)
+ return NULL;
+
+ rte_spinlock_lock(&enic->flows_lock);
+ flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
+ error);
+ if (flow)
+ LIST_INSERT_HEAD(&enic->flows, flow, next);
+ rte_spinlock_unlock(&enic->flows_lock);
+
+ return flow;
+}
+
+/**
+ * Destroy a flow supported by the NIC.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ __rte_unused struct rte_flow_error *error)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ rte_spinlock_lock(&enic->flows_lock);
+ enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ LIST_REMOVE(flow, next);
+ rte_spinlock_unlock(&enic->flows_lock);
+ return 0;
+}
+
+/**
+ * Flush all flows on the device.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+static int
+enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ struct enic *enic = pmd_priv(dev);
+
+ FLOW_TRACE();
+
+ rte_spinlock_lock(&enic->flows_lock);
+
+ while (!LIST_EMPTY(&enic->flows)) {
+ flow = LIST_FIRST(&enic->flows);
+ enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ LIST_REMOVE(flow, next);
+ }
+ rte_spinlock_unlock(&enic->flows_lock);
+ return 0;
+}
+
+/**
+ * Flow callback registration.
+ *
+ * @see rte_flow_ops
+ */
+const struct rte_flow_ops enic_flow_ops = {
+ .validate = enic_flow_validate,
+ .create = enic_flow_create,
+ .destroy = enic_flow_destroy,
+ .flush = enic_flow_flush,
+};
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d0262418..40dbec7f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -430,7 +430,7 @@ enic_intr_handler(void *arg)
vnic_intr_return_all_credits(&enic->intr);
enic_link_update(enic);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
enic_log_q_error(enic);
}
@@ -1225,7 +1225,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
}
}
- /* replace Rx funciton with a no-op to avoid getting stale pkts */
+ /* replace Rx function with a no-op to avoid getting stale pkts */
eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
rte_mb();
@@ -1314,6 +1314,9 @@ static int enic_dev_init(struct enic *enic)
vnic_dev_set_reset_flag(enic->vdev, 0);
+ LIST_INIT(&enic->flows);
+ rte_spinlock_init(&enic->flows_lock);
+
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 867bd25c..e4b80d49 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -104,6 +104,21 @@ int enic_get_vnic_config(struct enic *enic)
dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
? "" : "not "));
+ err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,
+ &enic->filter_tags);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting filter modes, %d\n", err);
+ return err;
+ }
+
+ dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n",
+ ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
+ ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" :
+ ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" :
+ "NONE"))),
+ ((enic->filter_tags) ? "" : "not "));
+
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index ba0cfd01..a39172f1 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -253,8 +253,20 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
}
mbuf->vlan_tci = vlan_tci;
- /* RSS flag */
- if (enic_cq_rx_desc_rss_type(cqrd)) {
+ if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
+ struct cq_enet_rq_clsf_desc *clsf_cqd;
+ uint16_t filter_id;
+ clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
+ filter_id = clsf_cqd->filter_id;
+ if (filter_id) {
+ pkt_flags |= PKT_RX_FDIR;
+ if (filter_id != ENIC_MAGIC_FILTER_ID) {
+ mbuf->hash.fdir.hi = clsf_cqd->filter_id;
+ pkt_flags |= PKT_RX_FDIR_ID;
+ }
+ }
+ } else if (enic_cq_rx_desc_rss_type(cqrd)) {
+ /* RSS flag */
pkt_flags |= PKT_RX_RSS_HASH;
mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
}
@@ -491,7 +503,8 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
tail_idx = enic_ring_incr(desc_count, tail_idx);
}
- rte_mempool_put_bulk(pool, (void **)free, nb_free);
+ if (nb_free > 0)
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
wq->tail_idx = tail_idx;
wq->ring.desc_avail += nb_to_free;
diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile
new file mode 100644
index 00000000..d516d362
--- /dev/null
+++ b/drivers/net/failsafe/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright 2017 6WIND S.A.
+# Copyright 2017 Mellanox.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of 6WIND S.A. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name
+LIB = librte_pmd_failsafe.a
+
+EXPORT_MAP := rte_pmd_failsafe_version.map
+
+LIBABIVER := 1
+
+# Sources are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_args.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_eal.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ether.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_flow.c
+
+# No exported include files
+
+# Basic CFLAGS:
+CFLAGS += -std=gnu99 -Wextra
+CFLAGS += -O3
+CFLAGS += -I.
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += -D_XOPEN_SOURCE=700
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-strict-prototypes
+CFLAGS += -pedantic
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
new file mode 100644
index 00000000..6006bef8
--- /dev/null
+++ b/drivers/net/failsafe/failsafe.c
@@ -0,0 +1,299 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_alarm.h>
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_vdev.h>
+
+#include "failsafe_private.h"
+
+const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
+static const struct rte_eth_link eth_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_UP,
+ .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+};
+
+static int
+fs_sub_device_alloc(struct rte_eth_dev *dev,
+ const char *params)
+{
+ uint8_t nb_subs;
+ int ret;
+
+ ret = failsafe_args_count_subdevice(dev, params);
+ if (ret)
+ return ret;
+ if (PRIV(dev)->subs_tail > FAILSAFE_MAX_ETHPORTS) {
+ ERROR("Cannot allocate more than %d ports",
+ FAILSAFE_MAX_ETHPORTS);
+ return -ENOSPC;
+ }
+ nb_subs = PRIV(dev)->subs_tail;
+ PRIV(dev)->subs = rte_zmalloc(NULL,
+ sizeof(struct sub_device) * nb_subs,
+ RTE_CACHE_LINE_SIZE);
+ if (PRIV(dev)->subs == NULL) {
+ ERROR("Could not allocate sub_devices");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void
+fs_sub_device_free(struct rte_eth_dev *dev)
+{
+ rte_free(PRIV(dev)->subs);
+}
+
+static void fs_hotplug_alarm(void *arg);
+
+int
+failsafe_hotplug_alarm_install(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev == NULL)
+ return -EINVAL;
+ if (PRIV(dev)->pending_alarm)
+ return 0;
+ ret = rte_eal_alarm_set(hotplug_poll * 1000,
+ fs_hotplug_alarm,
+ dev);
+ if (ret) {
+ ERROR("Could not set up plug-in event detection");
+ return ret;
+ }
+ PRIV(dev)->pending_alarm = 1;
+ return 0;
+}
+
+int
+failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev)
+{
+ int ret = 0;
+
+ if (PRIV(dev)->pending_alarm) {
+ rte_errno = 0;
+ rte_eal_alarm_cancel(fs_hotplug_alarm, dev);
+ if (rte_errno) {
+ ERROR("rte_eal_alarm_cancel failed (errno: %s)",
+ strerror(rte_errno));
+ ret = -rte_errno;
+ } else {
+ PRIV(dev)->pending_alarm = 0;
+ }
+ }
+ return ret;
+}
+
+static void
+fs_hotplug_alarm(void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct sub_device *sdev;
+ int ret;
+ uint8_t i;
+
+ if (!PRIV(dev)->pending_alarm)
+ return;
+ PRIV(dev)->pending_alarm = 0;
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state != PRIV(dev)->state)
+ break;
+ /* if we have non-probed device */
+ if (i != PRIV(dev)->subs_tail) {
+ ret = failsafe_eth_dev_state_sync(dev);
+ if (ret)
+ ERROR("Unable to synchronize sub_device state");
+ }
+ failsafe_dev_remove(dev);
+ ret = failsafe_hotplug_alarm_install(dev);
+ if (ret)
+ ERROR("Unable to set up next alarm");
+}
+
+static int
+fs_eth_dev_create(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *dev;
+ struct ether_addr *mac;
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ const char *params;
+ unsigned int socket_id;
+ uint8_t i;
+ int ret;
+
+ dev = NULL;
+ priv = NULL;
+ socket_id = rte_socket_id();
+ INFO("Creating fail-safe device on NUMA socket %u", socket_id);
+ params = rte_vdev_device_args(vdev);
+ if (params == NULL) {
+ ERROR("This PMD requires sub-devices, none provided");
+ return -1;
+ }
+ dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
+ if (dev == NULL) {
+ ERROR("Unable to allocate rte_eth_dev");
+ return -1;
+ }
+ priv = PRIV(dev);
+ priv->dev = dev;
+ dev->dev_ops = &failsafe_ops;
+ dev->data->mac_addrs = &PRIV(dev)->mac_addrs[0];
+ dev->data->dev_link = eth_link;
+ PRIV(dev)->nb_mac_addr = 1;
+ TAILQ_INIT(&PRIV(dev)->flow_list);
+ dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst;
+ dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst;
+ ret = fs_sub_device_alloc(dev, params);
+ if (ret) {
+ ERROR("Could not allocate sub_devices");
+ goto free_dev;
+ }
+ ret = failsafe_args_parse(dev, params);
+ if (ret)
+ goto free_subs;
+ ret = failsafe_eal_init(dev);
+ if (ret)
+ goto free_args;
+ ret = failsafe_hotplug_alarm_install(dev);
+ if (ret) {
+ ERROR("Could not set up plug-in event detection");
+ goto free_args;
+ }
+ mac = &dev->data->mac_addrs[0];
+ if (mac_from_arg) {
+ /*
+ * If MAC address was provided as a parameter,
+ * apply to all probed slaves.
+ */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
+ mac);
+ if (ret) {
+ ERROR("Failed to set default MAC address");
+ goto free_args;
+ }
+ }
+ } else {
+ /*
+ * Use the ether_addr from first probed
+ * device, either preferred or fallback.
+ */
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state >= DEV_PROBED) {
+ ether_addr_copy(&ETH(sdev)->data->mac_addrs[0],
+ mac);
+ break;
+ }
+ /*
+ * If no device has been probed and no ether_addr
+ * has been provided on the command line, use a random
+ * valid one.
+ * It will be applied during future slave state syncs to
+ * probed slaves.
+ */
+ if (i == priv->subs_tail)
+ eth_random_addr(&mac->addr_bytes[0]);
+ }
+ INFO("MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5]);
+ dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ return 0;
+free_args:
+ failsafe_args_free(dev);
+free_subs:
+ fs_sub_device_free(dev);
+free_dev:
+ rte_free(PRIV(dev));
+ rte_eth_dev_release_port(dev);
+ return -1;
+}
+
+static int
+fs_rte_eth_free(const char *name)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ dev = rte_eth_dev_allocated(name);
+ if (dev == NULL)
+ return -ENODEV;
+ ret = failsafe_eal_uninit(dev);
+ if (ret)
+ ERROR("Error while uninitializing sub-EAL");
+ failsafe_args_free(dev);
+ fs_sub_device_free(dev);
+ rte_free(PRIV(dev));
+ rte_eth_dev_release_port(dev);
+ return ret;
+}
+
+static int
+rte_pmd_failsafe_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s",
+ name);
+ return fs_eth_dev_create(vdev);
+}
+
+static int
+rte_pmd_failsafe_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ INFO("Uninitializing " FAILSAFE_DRIVER_NAME " for %s", name);
+ return fs_rte_eth_free(name);
+}
+
+static struct rte_vdev_driver failsafe_drv = {
+ .probe = rte_pmd_failsafe_probe,
+ .remove = rte_pmd_failsafe_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING);
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
new file mode 100644
index 00000000..1f22416f
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -0,0 +1,474 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <errno.h>
+
+#include <rte_debug.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+
+#include "failsafe_private.h"
+
+#define DEVARGS_MAXLEN 4096
+
+/* Callback used when a new device is found in devargs */
+typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params,
+ uint8_t head);
+
+uint64_t hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
+int mac_from_arg = 0;
+
+const char *pmd_failsafe_init_parameters[] = {
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
+ PMD_FAILSAFE_MAC_KVARG,
+ NULL,
+};
+
+/*
+ * input: text.
+ * output: 0: if text[0] != '(',
+ * 0: if there are no corresponding ')'
+ * n: distance to corresponding ')' otherwise
+ */
+static size_t
+closing_paren(const char *text)
+{
+ int nb_open = 0;
+ size_t i = 0;
+
+ while (text[i] != '\0') {
+ if (text[i] == '(')
+ nb_open++;
+ if (text[i] == ')')
+ nb_open--;
+ if (nb_open == 0)
+ return i;
+ i++;
+ }
+ return 0;
+}
+
+static int
+fs_parse_device(struct sub_device *sdev, char *args)
+{
+ struct rte_devargs *d;
+ int ret;
+
+ d = &sdev->devargs;
+ DEBUG("%s", args);
+ ret = rte_eal_devargs_parse(args, d);
+ if (ret) {
+ DEBUG("devargs parsing failed with code %d", ret);
+ return ret;
+ }
+ sdev->bus = d->bus;
+ sdev->state = DEV_PARSED;
+ return 0;
+}
+
+static void
+fs_sanitize_cmdline(char *args)
+{
+ char *nl;
+
+ nl = strrchr(args, '\n');
+ if (nl)
+ nl[0] = '\0';
+}
+
+static int
+fs_execute_cmd(struct sub_device *sdev, char *cmdline)
+{
+ FILE *fp;
+ /* store possible newline as well */
+ char output[DEVARGS_MAXLEN + 1];
+ size_t len;
+ int old_err;
+ int ret, pclose_ret;
+
+ RTE_ASSERT(cmdline != NULL || sdev->cmdline != NULL);
+ if (sdev->cmdline == NULL) {
+ size_t i;
+
+ len = strlen(cmdline) + 1;
+ sdev->cmdline = calloc(1, len);
+ if (sdev->cmdline == NULL) {
+ ERROR("Command line allocation failed");
+ return -ENOMEM;
+ }
+ snprintf(sdev->cmdline, len, "%s", cmdline);
+ /* Replace all commas in the command line by spaces */
+ for (i = 0; i < len; i++)
+ if (sdev->cmdline[i] == ',')
+ sdev->cmdline[i] = ' ';
+ }
+ DEBUG("'%s'", sdev->cmdline);
+ old_err = errno;
+ fp = popen(sdev->cmdline, "r");
+ if (fp == NULL) {
+ ret = errno;
+ ERROR("popen: %s", strerror(errno));
+ errno = old_err;
+ return ret;
+ }
+ /* We only read one line */
+ if (fgets(output, sizeof(output) - 1, fp) == NULL) {
+ DEBUG("Could not read command output");
+ ret = -ENODEV;
+ goto ret_pclose;
+ }
+ fs_sanitize_cmdline(output);
+ if (output[0] == '\0') {
+ ret = -ENODEV;
+ goto ret_pclose;
+ }
+ ret = fs_parse_device(sdev, output);
+ if (ret) {
+ ERROR("Parsing device '%s' failed", output);
+ goto ret_pclose;
+ }
+ret_pclose:
+ pclose_ret = pclose(fp);
+ if (pclose_ret) {
+ pclose_ret = errno;
+ ERROR("pclose: %s", strerror(errno));
+ errno = old_err;
+ return pclose_ret;
+ }
+ return ret;
+}
+
+static int
+fs_parse_device_param(struct rte_eth_dev *dev, const char *param,
+ uint8_t head)
+{
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ char *args = NULL;
+ size_t a, b;
+ int ret;
+
+ priv = PRIV(dev);
+ a = 0;
+ b = 0;
+ ret = 0;
+ while (param[b] != '(' &&
+ param[b] != '\0')
+ b++;
+ a = b;
+ b += closing_paren(&param[b]);
+ if (a == b) {
+ ERROR("Dangling parenthesis");
+ return -EINVAL;
+ }
+ a += 1;
+ args = strndup(&param[a], b - a);
+ if (args == NULL) {
+ ERROR("Not enough memory for parameter parsing");
+ return -ENOMEM;
+ }
+ sdev = &priv->subs[head];
+ if (strncmp(param, "dev", 3) == 0) {
+ ret = fs_parse_device(sdev, args);
+ if (ret)
+ goto free_args;
+ } else if (strncmp(param, "exec", 4) == 0) {
+ ret = fs_execute_cmd(sdev, args);
+ if (ret == -ENODEV) {
+ DEBUG("Reading device info from command line failed");
+ ret = 0;
+ }
+ if (ret)
+ goto free_args;
+ } else {
+ ERROR("Unrecognized device type: %.*s", (int)b, param);
+ return -EINVAL;
+ }
+free_args:
+ free(args);
+ return ret;
+}
+
+static int
+fs_parse_sub_devices(parse_cb *cb,
+ struct rte_eth_dev *dev, const char *params)
+{
+ size_t a, b;
+ uint8_t head;
+ int ret;
+
+ a = 0;
+ head = 0;
+ ret = 0;
+ while (params[a] != '\0') {
+ b = a;
+ while (params[b] != '(' &&
+ params[b] != ',' &&
+ params[b] != '\0')
+ b++;
+ if (b == a) {
+ ERROR("Invalid parameter");
+ return -EINVAL;
+ }
+ if (params[b] == ',') {
+ a = b + 1;
+ continue;
+ }
+ if (params[b] == '(') {
+ size_t start = b;
+
+ b += closing_paren(&params[b]);
+ if (b == start) {
+ ERROR("Dangling parenthesis");
+ return -EINVAL;
+ }
+ ret = (*cb)(dev, &params[a], head);
+ if (ret)
+ return ret;
+ head += 1;
+ b += 1;
+ if (params[b] == '\0')
+ return 0;
+ }
+ a = b + 1;
+ }
+ return 0;
+}
+
+static int
+fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN])
+{
+ char buffer[DEVARGS_MAXLEN] = {0};
+ size_t a, b;
+ int i;
+
+ a = 0;
+ i = 0;
+ while (params[a] != '\0') {
+ b = a;
+ while (params[b] != '(' &&
+ params[b] != ',' &&
+ params[b] != '\0')
+ b++;
+ if (b == a) {
+ ERROR("Invalid parameter");
+ return -EINVAL;
+ }
+ if (params[b] == ',' || params[b] == '\0')
+ i += snprintf(&buffer[i], b - a + 1, "%s", &params[a]);
+ if (params[b] == '(') {
+ size_t start = b;
+ b += closing_paren(&params[b]);
+ if (b == start)
+ return -EINVAL;
+ b += 1;
+ if (params[b] == '\0')
+ goto out;
+ }
+ a = b + 1;
+ }
+out:
+ snprintf(params, DEVARGS_MAXLEN, "%s", buffer);
+ return 0;
+}
+
+static int
+fs_get_u64_arg(const char *key __rte_unused,
+ const char *value, void *out)
+{
+ uint64_t *u64 = out;
+ char *endptr = NULL;
+
+ if ((value == NULL) || (out == NULL))
+ return -EINVAL;
+ errno = 0;
+ *u64 = strtoull(value, &endptr, 0);
+ if (errno != 0)
+ return -errno;
+ if (endptr == value)
+ return -1;
+ return 0;
+}
+
+static int
+fs_get_mac_addr_arg(const char *key __rte_unused,
+ const char *value, void *out)
+{
+ struct ether_addr *ea = out;
+ int ret;
+
+ if ((value == NULL) || (out == NULL))
+ return -EINVAL;
+ ret = sscanf(value, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &ea->addr_bytes[0], &ea->addr_bytes[1],
+ &ea->addr_bytes[2], &ea->addr_bytes[3],
+ &ea->addr_bytes[4], &ea->addr_bytes[5]);
+ return ret != ETHER_ADDR_LEN;
+}
+
+int
+failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
+{
+ struct fs_priv *priv;
+ char mut_params[DEVARGS_MAXLEN] = "";
+ struct rte_kvargs *kvlist = NULL;
+ unsigned int arg_count;
+ size_t n;
+ int ret;
+
+ priv = PRIV(dev);
+ ret = 0;
+ priv->subs_tx = FAILSAFE_MAX_ETHPORTS;
+ /* default parameters */
+ n = snprintf(mut_params, sizeof(mut_params), "%s", params);
+ if (n >= sizeof(mut_params)) {
+ ERROR("Parameter string too long (>=%zu)",
+ sizeof(mut_params));
+ return -ENOMEM;
+ }
+ ret = fs_parse_sub_devices(fs_parse_device_param,
+ dev, params);
+ if (ret < 0)
+ return ret;
+ ret = fs_remove_sub_devices_definition(mut_params);
+ if (ret < 0)
+ return ret;
+ if (strnlen(mut_params, sizeof(mut_params)) > 0) {
+ kvlist = rte_kvargs_parse(mut_params,
+ pmd_failsafe_init_parameters);
+ if (kvlist == NULL) {
+ ERROR("Error parsing parameters, usage:\n"
+ PMD_FAILSAFE_PARAM_STRING);
+ return -1;
+ }
+ /* PLUG_IN event poll timer */
+ arg_count = rte_kvargs_count(kvlist,
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG);
+ if (arg_count == 1) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
+ &fs_get_u64_arg, &hotplug_poll);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+ /* MAC addr */
+ arg_count = rte_kvargs_count(kvlist,
+ PMD_FAILSAFE_MAC_KVARG);
+ if (arg_count > 0) {
+ ret = rte_kvargs_process(kvlist,
+ PMD_FAILSAFE_MAC_KVARG,
+ &fs_get_mac_addr_arg,
+ &dev->data->mac_addrs[0]);
+ if (ret < 0)
+ goto free_kvlist;
+ mac_from_arg = 1;
+ }
+ }
+ PRIV(dev)->state = DEV_PARSED;
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+void
+failsafe_args_free(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ rte_free(sdev->cmdline);
+ sdev->cmdline = NULL;
+ free(sdev->devargs.args);
+ sdev->devargs.args = NULL;
+ }
+}
+
+static int
+fs_count_device(struct rte_eth_dev *dev, const char *param,
+ uint8_t head __rte_unused)
+{
+ size_t b = 0;
+
+ while (param[b] != '(' &&
+ param[b] != '\0')
+ b++;
+ if (strncmp(param, "dev", b) != 0 &&
+ strncmp(param, "exec", b) != 0) {
+ ERROR("Unrecognized device type: %.*s", (int)b, param);
+ return -EINVAL;
+ }
+ PRIV(dev)->subs_tail += 1;
+ return 0;
+}
+
+int
+failsafe_args_count_subdevice(struct rte_eth_dev *dev,
+ const char *params)
+{
+ return fs_parse_sub_devices(fs_count_device,
+ dev, params);
+}
+
+static int
+fs_parse_sub_device(struct sub_device *sdev)
+{
+ struct rte_devargs *da;
+ char devstr[DEVARGS_MAXLEN] = "";
+
+ da = &sdev->devargs;
+ snprintf(devstr, sizeof(devstr), "%s,%s", da->name, da->args);
+ return fs_parse_device(sdev, devstr);
+}
+
+int
+failsafe_args_parse_subs(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret = 0;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state >= DEV_PARSED)
+ continue;
+ if (sdev->cmdline)
+ ret = fs_execute_cmd(sdev, sdev->cmdline);
+ else
+ ret = fs_parse_sub_device(sdev);
+ if (ret == 0)
+ sdev->state = DEV_PARSED;
+ }
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
new file mode 100644
index 00000000..c8f4318e
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -0,0 +1,118 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "failsafe_private.h"
+
+static int
+fs_bus_init(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ struct rte_devargs *da;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_PARSED)
+ continue;
+ da = &sdev->devargs;
+ ret = rte_eal_hotplug_add(da->bus->name,
+ da->name,
+ da->args);
+ if (ret) {
+ ERROR("sub_device %d probe failed %s%s%s", i,
+ rte_errno ? "(" : "",
+ rte_errno ? strerror(rte_errno) : "",
+ rte_errno ? ")" : "");
+ continue;
+ }
+ ETH(sdev) = rte_eth_dev_allocated(da->name);
+ if (ETH(sdev) == NULL) {
+ ERROR("sub_device %d init went wrong", i);
+ return -ENODEV;
+ }
+ SUB_ID(sdev) = i;
+ sdev->fs_dev = dev;
+ sdev->dev = ETH(sdev)->device;
+ ETH(sdev)->state = RTE_ETH_DEV_DEFERRED;
+ sdev->state = DEV_PROBED;
+ }
+ return 0;
+}
+
+int
+failsafe_eal_init(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = fs_bus_init(dev);
+ if (ret)
+ return ret;
+ if (PRIV(dev)->state < DEV_PROBED)
+ PRIV(dev)->state = DEV_PROBED;
+ fs_switch_dev(dev, NULL);
+ return 0;
+}
+
+static int
+fs_bus_uninit(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev = NULL;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ ret = rte_eal_hotplug_remove(sdev->bus->name,
+ sdev->dev->name);
+ if (ret) {
+ ERROR("Failed to remove requested device %s",
+ sdev->dev->name);
+ continue;
+ }
+ sdev->state = DEV_PROBED - 1;
+ }
+ return 0;
+}
+
+int
+failsafe_eal_uninit(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = fs_bus_uninit(dev);
+ if (ret)
+ return ret;
+ PRIV(dev)->state = DEV_PROBED - 1;
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
new file mode 100644
index 00000000..a3a8cce9
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -0,0 +1,437 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "failsafe_private.h"
+
+/** Print a message out of a flow error. */
+static int
+fs_flow_complain(struct rte_flow_error *error)
+{
+ static const char *const errstrlist[] = {
+ [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
+ [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
+ [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
+ [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
+ [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
+ [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
+ [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
+ [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
+ };
+ const char *errstr;
+ char buf[32];
+ int err = rte_errno;
+
+ if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
+ !errstrlist[error->type])
+ errstr = "unknown type";
+ else
+ errstr = errstrlist[error->type];
+ ERROR("Caught error type %d (%s): %s%s\n",
+ error->type, errstr,
+ error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
+ error->cause), buf) : "",
+ error->message ? error->message : "(no stated reason)");
+ return -err;
+}
+
+static int
+eth_dev_flow_isolate_set(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_flow_error ferror;
+ int ret;
+
+ if (!PRIV(dev)->flow_isolated) {
+ DEBUG("Flow isolation already disabled");
+ } else {
+ DEBUG("Enabling flow isolation");
+ ret = rte_flow_isolate(PORT_ID(sdev),
+ PRIV(dev)->flow_isolated,
+ &ferror);
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_eth_dev *edev;
+ struct rte_vlan_filter_conf *vfc1;
+ struct rte_vlan_filter_conf *vfc2;
+ struct rte_flow *flow;
+ struct rte_flow_error ferror;
+ uint32_t i;
+ int ret;
+
+ edev = ETH(sdev);
+ /* RX queue setup */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct rxq *rxq;
+
+ rxq = dev->data->rx_queues[i];
+ ret = rte_eth_rx_queue_setup(PORT_ID(sdev), i,
+ rxq->info.nb_desc, rxq->socket_id,
+ &rxq->info.conf, rxq->info.mp);
+ if (ret) {
+ ERROR("rx_queue_setup failed");
+ return ret;
+ }
+ }
+ /* TX queue setup */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct txq *txq;
+
+ txq = dev->data->tx_queues[i];
+ ret = rte_eth_tx_queue_setup(PORT_ID(sdev), i,
+ txq->info.nb_desc, txq->socket_id,
+ &txq->info.conf);
+ if (ret) {
+ ERROR("tx_queue_setup failed");
+ return ret;
+ }
+ }
+ /* dev_link.link_status */
+ if (dev->data->dev_link.link_status !=
+ edev->data->dev_link.link_status) {
+ DEBUG("Configuring link_status");
+ if (dev->data->dev_link.link_status)
+ ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
+ else
+ ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
+ if (ret) {
+ ERROR("Failed to apply link_status");
+ return ret;
+ }
+ } else {
+ DEBUG("link_status already set");
+ }
+ /* promiscuous */
+ if (dev->data->promiscuous != edev->data->promiscuous) {
+ DEBUG("Configuring promiscuous");
+ if (dev->data->promiscuous)
+ rte_eth_promiscuous_enable(PORT_ID(sdev));
+ else
+ rte_eth_promiscuous_disable(PORT_ID(sdev));
+ } else {
+ DEBUG("promiscuous already set");
+ }
+ /* all_multicast */
+ if (dev->data->all_multicast != edev->data->all_multicast) {
+ DEBUG("Configuring all_multicast");
+ if (dev->data->all_multicast)
+ rte_eth_allmulticast_enable(PORT_ID(sdev));
+ else
+ rte_eth_allmulticast_disable(PORT_ID(sdev));
+ } else {
+ DEBUG("all_multicast already set");
+ }
+ /* MTU */
+ if (dev->data->mtu != edev->data->mtu) {
+ DEBUG("Configuring MTU");
+ ret = rte_eth_dev_set_mtu(PORT_ID(sdev), dev->data->mtu);
+ if (ret) {
+ ERROR("Failed to apply MTU");
+ return ret;
+ }
+ } else {
+ DEBUG("MTU already set");
+ }
+ /* default MAC */
+ DEBUG("Configuring default MAC address");
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
+ &dev->data->mac_addrs[0]);
+ if (ret) {
+ ERROR("Setting default MAC address failed");
+ return ret;
+ }
+ /* additional MAC */
+ if (PRIV(dev)->nb_mac_addr > 1)
+ DEBUG("Configure additional MAC address%s",
+ (PRIV(dev)->nb_mac_addr > 2 ? "es" : ""));
+ for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) {
+ struct ether_addr *ea;
+
+ ea = &dev->data->mac_addrs[i];
+ ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,
+ PRIV(dev)->mac_addr_pool[i]);
+ if (ret) {
+ char ea_fmt[ETHER_ADDR_FMT_SIZE];
+
+ ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea);
+ ERROR("Adding MAC address %s failed", ea_fmt);
+ }
+ }
+ /* VLAN filter */
+ vfc1 = &dev->data->vlan_filter_conf;
+ vfc2 = &edev->data->vlan_filter_conf;
+ if (memcmp(vfc1, vfc2, sizeof(struct rte_vlan_filter_conf))) {
+ uint64_t vbit;
+ uint64_t ids;
+ size_t i;
+ uint16_t vlan_id;
+
+ DEBUG("Configuring VLAN filter");
+ for (i = 0; i < RTE_DIM(vfc1->ids); i++) {
+ if (vfc1->ids[i] == 0)
+ continue;
+ ids = vfc1->ids[i];
+ while (ids) {
+ vlan_id = 64 * i;
+ /* count trailing zeroes */
+ vbit = ~ids & (ids - 1);
+ /* clear least significant bit set */
+ ids ^= (ids ^ (ids - 1)) ^ vbit;
+ for (; vbit; vlan_id++)
+ vbit >>= 1;
+ ret = rte_eth_dev_vlan_filter(
+ PORT_ID(sdev), vlan_id, 1);
+ if (ret) {
+ ERROR("Failed to apply VLAN filter %hu",
+ vlan_id);
+ return ret;
+ }
+ }
+ }
+ } else {
+ DEBUG("VLAN filter already set");
+ }
+ /* rte_flow */
+ if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) {
+ DEBUG("rte_flow already set");
+ } else {
+ DEBUG("Resetting rte_flow configuration");
+ ret = rte_flow_flush(PORT_ID(sdev), &ferror);
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ i = 0;
+ rte_errno = 0;
+ DEBUG("Configuring rte_flow");
+ TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) {
+ DEBUG("Creating flow #%" PRIu32, i++);
+ flow->flows[SUB_ID(sdev)] =
+ rte_flow_create(PORT_ID(sdev),
+ &flow->fd->attr,
+ flow->fd->items,
+ flow->fd->actions,
+ &ferror);
+ ret = rte_errno;
+ if (ret)
+ break;
+ }
+ if (ret) {
+ fs_flow_complain(&ferror);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+fs_dev_remove(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ switch (sdev->state) {
+ case DEV_STARTED:
+ rte_eth_dev_stop(PORT_ID(sdev));
+ sdev->state = DEV_ACTIVE;
+ /* fallthrough */
+ case DEV_ACTIVE:
+ rte_eth_dev_close(PORT_ID(sdev));
+ sdev->state = DEV_PROBED;
+ /* fallthrough */
+ case DEV_PROBED:
+ ret = rte_eal_hotplug_remove(sdev->bus->name,
+ sdev->dev->name);
+ if (ret) {
+ ERROR("Bus detach failed for sub_device %u",
+ SUB_ID(sdev));
+ } else {
+ ETH(sdev)->state = RTE_ETH_DEV_UNUSED;
+ }
+ sdev->state = DEV_PARSED;
+ /* fallthrough */
+ case DEV_PARSED:
+ case DEV_UNDEFINED:
+ sdev->state = DEV_UNDEFINED;
+ /* the end */
+ break;
+ }
+ failsafe_hotplug_alarm_install(sdev->fs_dev);
+}
+
+static inline int
+fs_rxtx_clean(struct sub_device *sdev)
+{
+ uint16_t i;
+
+ for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++)
+ if (FS_ATOMIC_RX(sdev, i))
+ return 0;
+ for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++)
+ if (FS_ATOMIC_TX(sdev, i))
+ return 0;
+ return 1;
+}
+
+void
+failsafe_dev_remove(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ if (sdev->remove && fs_rxtx_clean(sdev))
+ fs_dev_remove(sdev);
+}
+
+int
+failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint32_t inactive;
+ int ret;
+ uint8_t i;
+
+ if (PRIV(dev)->state < DEV_PARSED)
+ return 0;
+
+ ret = failsafe_args_parse_subs(dev);
+ if (ret)
+ goto err_remove;
+
+ if (PRIV(dev)->state < DEV_PROBED)
+ return 0;
+ ret = failsafe_eal_init(dev);
+ if (ret)
+ goto err_remove;
+ if (PRIV(dev)->state < DEV_ACTIVE)
+ return 0;
+ inactive = 0;
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state == DEV_PROBED) {
+ inactive |= UINT32_C(1) << i;
+ ret = eth_dev_flow_isolate_set(dev, sdev);
+ if (ret) {
+ ERROR("Could not apply configuration to sub_device %d",
+ i);
+ goto err_remove;
+ }
+ }
+ }
+ ret = dev->dev_ops->dev_configure(dev);
+ if (ret)
+ goto err_remove;
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (inactive & (UINT32_C(1) << i)) {
+ ret = fs_eth_dev_conf_apply(dev, sdev);
+ if (ret) {
+ ERROR("Could not apply configuration to sub_device %d",
+ i);
+ goto err_remove;
+ }
+ }
+ }
+ /*
+ * If new devices have been configured, check if
+ * the link state has changed.
+ */
+ if (inactive)
+ dev->dev_ops->link_update(dev, 1);
+ if (PRIV(dev)->state < DEV_STARTED)
+ return 0;
+ ret = dev->dev_ops->dev_start(dev);
+ if (ret)
+ goto err_remove;
+ return 0;
+err_remove:
+ FOREACH_SUBDEV(sdev, i, dev)
+ if (sdev->state != PRIV(dev)->state)
+ sdev->remove = 1;
+ return ret;
+}
+
+int
+failsafe_eth_rmv_event_callback(uint8_t port_id __rte_unused,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct sub_device *sdev = cb_arg;
+
+ /* Switch as soon as possible tx_dev. */
+ fs_switch_dev(sdev->fs_dev, sdev);
+ /* Use safe bursts in any case. */
+ set_burst_fn(sdev->fs_dev, 1);
+ /*
+ * Async removal, the sub-PMD will try to unregister
+ * the callback at the source of the current thread context.
+ */
+ sdev->remove = 1;
+ return 0;
+}
+
+int
+failsafe_eth_lsc_event_callback(uint8_t port_id __rte_unused,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ int ret;
+
+ ret = dev->dev_ops->link_update(dev, 0);
+ /* We must pass on the LSC event */
+ if (ret)
+ return _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ else
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
new file mode 100644
index 00000000..153ceeed
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -0,0 +1,244 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "failsafe_private.h"
+
+static struct rte_flow *
+fs_flow_allocate(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow *flow;
+ size_t fdsz;
+
+ fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
+ flow = rte_zmalloc(NULL,
+ sizeof(struct rte_flow) + fdsz,
+ RTE_CACHE_LINE_SIZE);
+ if (flow == NULL) {
+ ERROR("Could not allocate new flow");
+ return NULL;
+ }
+ flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
+ if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) {
+ ERROR("Failed to copy flow description");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+static void
+fs_flow_release(struct rte_flow **flow)
+{
+ rte_free(*flow);
+ *flow = NULL;
+}
+
+static int
+fs_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_flow_validate on sub_device %d", i);
+ ret = rte_flow_validate(PORT_ID(sdev),
+ attr, patterns, actions, error);
+ if (ret) {
+ ERROR("Operation rte_flow_validate failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+fs_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ struct rte_flow *flow;
+ uint8_t i;
+
+ flow = fs_flow_allocate(attr, patterns, actions);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ flow->flows[i] = rte_flow_create(PORT_ID(sdev),
+ attr, patterns, actions, error);
+ if (flow->flows[i] == NULL) {
+ ERROR("Failed to create flow on sub_device %d",
+ i);
+ goto err;
+ }
+ }
+ TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
+ return flow;
+err:
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (flow->flows[i] != NULL)
+ rte_flow_destroy(PORT_ID(sdev),
+ flow->flows[i], error);
+ }
+ fs_flow_release(&flow);
+ return NULL;
+}
+
+static int
+fs_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ if (flow == NULL) {
+ ERROR("Invalid flow");
+ return -EINVAL;
+ }
+ ret = 0;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ int local_ret;
+
+ if (flow->flows[i] == NULL)
+ continue;
+ local_ret = rte_flow_destroy(PORT_ID(sdev),
+ flow->flows[i], error);
+ if (local_ret) {
+ ERROR("Failed to destroy flow on sub_device %d: %d",
+ i, local_ret);
+ if (ret == 0)
+ ret = local_ret;
+ }
+ }
+ TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
+ fs_flow_release(&flow);
+ return ret;
+}
+
+static int
+fs_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ struct rte_flow *flow;
+ void *tmp;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_flow_flush on sub_device %d", i);
+ ret = rte_flow_flush(PORT_ID(sdev), error);
+ if (ret) {
+ ERROR("Operation rte_flow_flush failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ TAILQ_FOREACH_SAFE(flow, &PRIV(dev)->flow_list, next, tmp) {
+ TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
+ fs_flow_release(&flow);
+ }
+ return 0;
+}
+
+static int
+fs_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ enum rte_flow_action_type type,
+ void *arg,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev != NULL) {
+ return rte_flow_query(PORT_ID(sdev),
+ flow->flows[SUB_ID(sdev)], type, arg, error);
+ }
+ WARN("No active sub_device to query about its flow");
+ return -1;
+}
+
+static int
+fs_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state < DEV_PROBED)
+ continue;
+ DEBUG("Calling rte_flow_isolate on sub_device %d", i);
+ if (PRIV(dev)->flow_isolated != sdev->flow_isolated)
+ WARN("flow isolation mode of sub_device %d in incoherent state.",
+ i);
+ ret = rte_flow_isolate(PORT_ID(sdev), set, error);
+ if (ret) {
+ ERROR("Operation rte_flow_isolate failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ sdev->flow_isolated = set;
+ }
+ PRIV(dev)->flow_isolated = set;
+ return 0;
+}
+
+const struct rte_flow_ops fs_flow_ops = {
+ .validate = fs_flow_validate,
+ .create = fs_flow_create,
+ .destroy = fs_flow_destroy,
+ .flush = fs_flow_flush,
+ .query = fs_flow_query,
+ .isolate = fs_flow_isolate,
+};
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
new file mode 100644
index 00000000..ff9ad155
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -0,0 +1,867 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+
+#include "failsafe_private.h"
+
+static struct rte_eth_dev_info default_infos = {
+ /* Max possible number of elements */
+ .max_rx_pktlen = UINT32_MAX,
+ .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
+ .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
+ .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
+ .max_hash_mac_addrs = UINT32_MAX,
+ .max_vfs = UINT16_MAX,
+ .max_vmdq_pools = UINT16_MAX,
+ .rx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ .nb_seg_max = UINT16_MAX,
+ .nb_mtu_seg_max = UINT16_MAX,
+ },
+ .tx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ .nb_seg_max = UINT16_MAX,
+ .nb_mtu_seg_max = UINT16_MAX,
+ },
+ /*
+ * Set of capabilities that can be verified upon
+ * configuring a sub-device.
+ */
+ .rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO,
+ .tx_offload_capa = 0x0,
+ .flow_type_rss_offloads = 0x0,
+};
+
+/**
+ * Check whether a specific offloading capability
+ * is supported by a sub_device.
+ *
+ * @return
+ * 0: all requested capabilities are supported by the sub_device
+ * positive value: This flag at least is not supported by the sub_device
+ */
+static int
+fs_port_offload_validate(struct rte_eth_dev *dev,
+ struct sub_device *sdev)
+{
+ struct rte_eth_dev_info infos = {0};
+ struct rte_eth_conf *cf;
+ uint32_t cap;
+
+ cf = &dev->data->dev_conf;
+ SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
+ /* RX capabilities */
+ cap = infos.rx_offload_capa;
+ if (cf->rxmode.hw_vlan_strip &&
+ ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
+ WARN("VLAN stripping offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ if (cf->rxmode.hw_ip_checksum &&
+ ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM)) !=
+ (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))) {
+ WARN("IP checksum offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ }
+ if (cf->rxmode.enable_lro &&
+ ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
+ WARN("TCP LRO offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_TCP_LRO;
+ }
+ if (cf->rxmode.hw_vlan_extend &&
+ ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
+ WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
+ SUB_ID(sdev));
+ return DEV_RX_OFFLOAD_QINQ_STRIP;
+ }
+ /* TX capabilities */
+ /* Nothing to do, no tx capa supported */
+ return 0;
+}
+
+/*
+ * Disable the dev_conf flag related to an offload capability flag
+ * within an ethdev configuration.
+ */
+static int
+fs_port_disable_offload(struct rte_eth_conf *cf,
+ uint32_t ol_cap)
+{
+ switch (ol_cap) {
+ case DEV_RX_OFFLOAD_VLAN_STRIP:
+ INFO("Disabling VLAN stripping offload");
+ cf->rxmode.hw_vlan_strip = 0;
+ break;
+ case DEV_RX_OFFLOAD_IPV4_CKSUM:
+ case DEV_RX_OFFLOAD_UDP_CKSUM:
+ case DEV_RX_OFFLOAD_TCP_CKSUM:
+ case (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM):
+ INFO("Disabling IP checksum offload");
+ cf->rxmode.hw_ip_checksum = 0;
+ break;
+ case DEV_RX_OFFLOAD_TCP_LRO:
+ INFO("Disabling TCP LRO offload");
+ cf->rxmode.enable_lro = 0;
+ break;
+ case DEV_RX_OFFLOAD_QINQ_STRIP:
+ INFO("Disabling stacked VLAN stripping offload");
+ cf->rxmode.hw_vlan_extend = 0;
+ break;
+ default:
+ DEBUG("Unable to disable offload capability: %" PRIx32,
+ ol_cap);
+ return -1;
+ }
+ return 0;
+}
+
+static int
+fs_dev_configure(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int capa_flag;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_PROBED)
+ continue;
+ DEBUG("Checking capabilities for sub_device %d", i);
+ while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
+ /*
+ * Refuse to change configuration if multiple devices
+ * are present and we already have configured at least
+ * some of them.
+ */
+ if (PRIV(dev)->state >= DEV_ACTIVE &&
+ PRIV(dev)->subs_tail > 1) {
+ ERROR("device already configured, cannot fix live configuration");
+ return -1;
+ }
+ ret = fs_port_disable_offload(&dev->data->dev_conf,
+ capa_flag);
+ if (ret) {
+ ERROR("Unable to disable offload capability");
+ return ret;
+ }
+ }
+ }
+ FOREACH_SUBDEV(sdev, i, dev) {
+ int rmv_interrupt = 0;
+ int lsc_interrupt = 0;
+ int lsc_enabled;
+
+ if (sdev->state != DEV_PROBED)
+ continue;
+
+ rmv_interrupt = ETH(sdev)->data->dev_flags &
+ RTE_ETH_DEV_INTR_RMV;
+ if (rmv_interrupt) {
+ DEBUG("Enabling RMV interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.rmv = 1;
+ } else {
+ DEBUG("sub_device %d does not support RMV event", i);
+ }
+ lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
+ lsc_interrupt = lsc_enabled &&
+ (ETH(sdev)->data->dev_flags &
+ RTE_ETH_DEV_INTR_LSC);
+ if (lsc_interrupt) {
+ DEBUG("Enabling LSC interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.lsc = 1;
+ } else if (lsc_enabled && !lsc_interrupt) {
+ DEBUG("Disabling LSC interrupts for sub_device %d", i);
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ }
+ DEBUG("Configuring sub-device %d", i);
+ sdev->remove = 0;
+ ret = rte_eth_dev_configure(PORT_ID(sdev),
+ dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues,
+ &dev->data->dev_conf);
+ if (ret) {
+ ERROR("Could not configure sub_device %d", i);
+ return ret;
+ }
+ if (rmv_interrupt) {
+ ret = rte_eth_dev_callback_register(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_RMV,
+ failsafe_eth_rmv_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to register RMV callback for sub_device %d",
+ SUB_ID(sdev));
+ }
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ if (lsc_interrupt) {
+ ret = rte_eth_dev_callback_register(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_LSC,
+ failsafe_eth_lsc_event_callback,
+ dev);
+ if (ret)
+ WARN("Failed to register LSC callback for sub_device %d",
+ SUB_ID(sdev));
+ }
+ dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
+ sdev->state = DEV_ACTIVE;
+ }
+ if (PRIV(dev)->state < DEV_ACTIVE)
+ PRIV(dev)->state = DEV_ACTIVE;
+ return 0;
+}
+
+static int
+fs_dev_start(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV(sdev, i, dev) {
+ if (sdev->state != DEV_ACTIVE)
+ continue;
+ DEBUG("Starting sub_device %d", i);
+ ret = rte_eth_dev_start(PORT_ID(sdev));
+ if (ret)
+ return ret;
+ sdev->state = DEV_STARTED;
+ }
+ if (PRIV(dev)->state < DEV_STARTED)
+ PRIV(dev)->state = DEV_STARTED;
+ fs_switch_dev(dev, NULL);
+ return 0;
+}
+
+static void
+fs_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ PRIV(dev)->state = DEV_STARTED - 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
+ rte_eth_dev_stop(PORT_ID(sdev));
+ sdev->state = DEV_STARTED - 1;
+ }
+}
+
+static int
+fs_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
+ ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
+ if (ret) {
+ ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
+ ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
+ if (ret) {
+ ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void fs_dev_free_queues(struct rte_eth_dev *dev);
+static void
+fs_dev_close(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ failsafe_hotplug_alarm_cancel(dev);
+ if (PRIV(dev)->state == DEV_STARTED)
+ dev->dev_ops->dev_stop(dev);
+ PRIV(dev)->state = DEV_ACTIVE - 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Closing sub_device %d", i);
+ rte_eth_dev_close(PORT_ID(sdev));
+ sdev->state = DEV_ACTIVE - 1;
+ }
+ fs_dev_free_queues(dev);
+}
+
+static void
+fs_rx_queue_release(void *queue)
+{
+ struct rte_eth_dev *dev;
+ struct sub_device *sdev;
+ uint8_t i;
+ struct rxq *rxq;
+
+ if (queue == NULL)
+ return;
+ rxq = queue;
+ dev = rxq->priv->dev;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ SUBOPS(sdev, rx_queue_release)
+ (ETH(sdev)->data->rx_queues[rxq->qid]);
+ dev->data->rx_queues[rxq->qid] = NULL;
+ rte_free(rxq);
+}
+
+static int
+fs_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ uint8_t i;
+ int ret;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ if (rxq != NULL) {
+ fs_rx_queue_release(rxq);
+ dev->data->rx_queues[rx_queue_id] = NULL;
+ }
+ rxq = rte_zmalloc(NULL,
+ sizeof(*rxq) +
+ sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -ENOMEM;
+ FOREACH_SUBDEV(sdev, i, dev)
+ rte_atomic64_init(&rxq->refcnt[i]);
+ rxq->qid = rx_queue_id;
+ rxq->socket_id = socket_id;
+ rxq->info.mp = mb_pool;
+ rxq->info.conf = *rx_conf;
+ rxq->info.nb_desc = nb_rx_desc;
+ rxq->priv = PRIV(dev);
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
+ rx_queue_id,
+ nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (ret) {
+ ERROR("RX queue setup failed for sub_device %d", i);
+ goto free_rxq;
+ }
+ }
+ return 0;
+free_rxq:
+ fs_rx_queue_release(rxq);
+ return ret;
+}
+
+static void
+fs_tx_queue_release(void *queue)
+{
+ struct rte_eth_dev *dev;
+ struct sub_device *sdev;
+ uint8_t i;
+ struct txq *txq;
+
+ if (queue == NULL)
+ return;
+ txq = queue;
+ dev = txq->priv->dev;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ SUBOPS(sdev, tx_queue_release)
+ (ETH(sdev)->data->tx_queues[txq->qid]);
+ dev->data->tx_queues[txq->qid] = NULL;
+ rte_free(txq);
+}
+
+static int
+fs_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ uint8_t i;
+ int ret;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ if (txq != NULL) {
+ fs_tx_queue_release(txq);
+ dev->data->tx_queues[tx_queue_id] = NULL;
+ }
+ txq = rte_zmalloc("ethdev TX queue",
+ sizeof(*txq) +
+ sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -ENOMEM;
+ FOREACH_SUBDEV(sdev, i, dev)
+ rte_atomic64_init(&txq->refcnt[i]);
+ txq->qid = tx_queue_id;
+ txq->socket_id = socket_id;
+ txq->info.conf = *tx_conf;
+ txq->info.nb_desc = nb_tx_desc;
+ txq->priv = PRIV(dev);
+ dev->data->tx_queues[tx_queue_id] = txq;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
+ tx_queue_id,
+ nb_tx_desc, socket_id,
+ tx_conf);
+ if (ret) {
+ ERROR("TX queue setup failed for sub_device %d", i);
+ goto free_txq;
+ }
+ }
+ return 0;
+free_txq:
+ fs_tx_queue_release(txq);
+ return ret;
+}
+
+static void
+fs_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ fs_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ fs_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static void
+fs_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_promiscuous_enable(PORT_ID(sdev));
+}
+
+static void
+fs_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_promiscuous_disable(PORT_ID(sdev));
+}
+
+static void
+fs_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_allmulticast_enable(PORT_ID(sdev));
+}
+
+static void
+fs_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_allmulticast_disable(PORT_ID(sdev));
+}
+
+static int
+fs_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling link_update on sub_device %d", i);
+ ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
+ if (ret && ret != -1) {
+ ERROR("Link update failed for sub_device %d with error %d",
+ i, ret);
+ return ret;
+ }
+ }
+ if (TX_SUBDEV(dev)) {
+ struct rte_eth_link *l1;
+ struct rte_eth_link *l2;
+
+ l1 = &dev->data->dev_link;
+ l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
+ if (memcmp(l1, l2, sizeof(*l1))) {
+ *l1 = *l2;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static void
+fs_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ if (TX_SUBDEV(dev) == NULL)
+ return;
+ rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
+}
+
+static void
+fs_stats_reset(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_stats_reset(PORT_ID(sdev));
+}
+
+/**
+ * Fail-safe dev_infos_get rules:
+ *
+ * No sub_device:
+ * Numerables:
+ * Use the maximum possible values for any field, so as not
+ * to impede any further configuration effort.
+ * Capabilities:
+ * Limits capabilities to those that are understood by the
+ * fail-safe PMD. This understanding stems from the fail-safe
+ * being capable of verifying that the related capability is
+ * expressed within the device configuration (struct rte_eth_conf).
+ *
+ * At least one probed sub_device:
+ * Numerables:
+ * Uses values from the active probed sub_device
+ * The rationale here is that if any sub_device is less capable
+ * (for example concerning the number of queues) than the active
+ * sub_device, then its subsequent configuration will fail.
+ * It is impossible to foresee this failure when the failing sub_device
+ * is supposed to be plugged-in later on, so the configuration process
+ * is the single point of failure and error reporting.
+ * Capabilities:
+ * Uses a logical AND of RX capabilities among
+ * all sub_devices and the default capabilities.
+ * Uses a logical AND of TX capabilities among
+ * the active probed sub_device and the default capabilities.
+ *
+ */
+static void
+fs_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *infos)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL) {
+ DEBUG("No probed device, using default infos");
+ rte_memcpy(&PRIV(dev)->infos, &default_infos,
+ sizeof(default_infos));
+ } else {
+ uint32_t rx_offload_capa;
+
+ rx_offload_capa = default_infos.rx_offload_capa;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ rte_eth_dev_info_get(PORT_ID(sdev),
+ &PRIV(dev)->infos);
+ rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
+ }
+ sdev = TX_SUBDEV(dev);
+ rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
+ PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
+ PRIV(dev)->infos.tx_offload_capa &=
+ default_infos.tx_offload_capa;
+ PRIV(dev)->infos.flow_type_rss_offloads &=
+ default_infos.flow_type_rss_offloads;
+ }
+ rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
+}
+
+static const uint32_t *
+fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ struct sub_device *sdev;
+ struct rte_eth_dev *edev;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL)
+ return NULL;
+ edev = ETH(sdev);
+ /* ENOTSUP: counts as no supported ptypes */
+ if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
+ return NULL;
+ /*
+ * The API does not permit to do a clean AND of all ptypes,
+ * It is also incomplete by design and we do not really care
+ * to have a best possible value in this context.
+ * We just return the ptypes of the device of highest
+ * priority, usually the PREFERRED device.
+ */
+ return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
+}
+
+static int
+fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
+ ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
+ i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
+ ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+fs_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct sub_device *sdev;
+
+ sdev = TX_SUBDEV(dev);
+ if (sdev == NULL)
+ return 0;
+ if (SUBOPS(sdev, flow_ctrl_get) == NULL)
+ return -ENOTSUP;
+ return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
+}
+
+static int
+fs_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
+ ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ /* No check: already done within the rte_eth_dev_mac_addr_remove
+ * call for the fail-safe device.
+ */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
+ &dev->data->mac_addrs[index]);
+ PRIV(dev)->mac_addr_pool[index] = 0;
+}
+
+static int
+fs_mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t vmdq)
+{
+ struct sub_device *sdev;
+ int ret;
+ uint8_t i;
+
+ RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
+ PRIu8 " with error %d", i, ret);
+ return ret;
+ }
+ }
+ if (index >= PRIV(dev)->nb_mac_addr) {
+ DEBUG("Growing mac_addrs array");
+ PRIV(dev)->nb_mac_addr = index;
+ }
+ PRIV(dev)->mac_addr_pool[index] = vmdq;
+ return 0;
+}
+
+static void
+fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+}
+
+static int
+fs_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type type,
+ enum rte_filter_op op,
+ void *arg)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ if (type == RTE_ETH_FILTER_GENERIC &&
+ op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &fs_flow_ops;
+ return 0;
+ }
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
+ ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
+ " with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+const struct eth_dev_ops failsafe_ops = {
+ .dev_configure = fs_dev_configure,
+ .dev_start = fs_dev_start,
+ .dev_stop = fs_dev_stop,
+ .dev_set_link_down = fs_dev_set_link_down,
+ .dev_set_link_up = fs_dev_set_link_up,
+ .dev_close = fs_dev_close,
+ .promiscuous_enable = fs_promiscuous_enable,
+ .promiscuous_disable = fs_promiscuous_disable,
+ .allmulticast_enable = fs_allmulticast_enable,
+ .allmulticast_disable = fs_allmulticast_disable,
+ .link_update = fs_link_update,
+ .stats_get = fs_stats_get,
+ .stats_reset = fs_stats_reset,
+ .dev_infos_get = fs_dev_infos_get,
+ .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
+ .mtu_set = fs_mtu_set,
+ .vlan_filter_set = fs_vlan_filter_set,
+ .rx_queue_setup = fs_rx_queue_setup,
+ .tx_queue_setup = fs_tx_queue_setup,
+ .rx_queue_release = fs_rx_queue_release,
+ .tx_queue_release = fs_tx_queue_release,
+ .flow_ctrl_get = fs_flow_ctrl_get,
+ .flow_ctrl_set = fs_flow_ctrl_set,
+ .mac_addr_remove = fs_mac_addr_remove,
+ .mac_addr_add = fs_mac_addr_add,
+ .mac_addr_set = fs_mac_addr_set,
+ .filter_ctrl = fs_filter_ctrl,
+};
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
new file mode 100644
index 00000000..0361cf43
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -0,0 +1,359 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
+#define _RTE_ETH_FAILSAFE_PRIVATE_H_
+
+#include <sys/queue.h>
+
+#include <rte_atomic.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_devargs.h>
+
+#define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
+
+#define PMD_FAILSAFE_MAC_KVARG "mac"
+#define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll"
+#define PMD_FAILSAFE_PARAM_STRING \
+ "dev(<ifc>)," \
+ "exec(<shell command>)," \
+ "mac=mac_addr," \
+ "hotplug_poll=u64" \
+ ""
+
+#define FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS 2000
+
+#define FAILSAFE_MAX_ETHPORTS 2
+#define FAILSAFE_MAX_ETHADDR 128
+
+/* TYPES */
+
+struct rxq {
+ struct fs_priv *priv;
+ uint16_t qid;
+ /* id of last sub_device polled */
+ uint8_t last_polled;
+ unsigned int socket_id;
+ struct rte_eth_rxq_info info;
+ rte_atomic64_t refcnt[];
+};
+
+struct txq {
+ struct fs_priv *priv;
+ uint16_t qid;
+ unsigned int socket_id;
+ struct rte_eth_txq_info info;
+ rte_atomic64_t refcnt[];
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ /* sub_flows */
+ struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS];
+ /* flow description for synchronization */
+ struct rte_flow_desc *fd;
+};
+
+enum dev_state {
+ DEV_UNDEFINED,
+ DEV_PARSED,
+ DEV_PROBED,
+ DEV_ACTIVE,
+ DEV_STARTED,
+};
+
+struct sub_device {
+ /* Exhaustive DPDK device description */
+ struct rte_devargs devargs;
+ struct rte_bus *bus;
+ struct rte_device *dev;
+ struct rte_eth_dev *edev;
+ uint8_t sid;
+ /* Device state machine */
+ enum dev_state state;
+ /* Some device are defined as a command line */
+ char *cmdline;
+ /* fail-safe device backreference */
+ struct rte_eth_dev *fs_dev;
+ /* flag calling for recollection */
+ volatile unsigned int remove:1;
+ /* flow isolation state */
+ int flow_isolated:1;
+};
+
+struct fs_priv {
+ struct rte_eth_dev *dev;
+ /*
+ * Set of sub_devices.
+ * subs[0] is the preferred device
+ * any other is just another slave
+ */
+ struct sub_device *subs;
+ uint8_t subs_head; /* if head == tail, no subs */
+ uint8_t subs_tail; /* first invalid */
+ uint8_t subs_tx; /* current emitting device */
+ uint8_t current_probed;
+ /* flow mapping */
+ TAILQ_HEAD(sub_flows, rte_flow) flow_list;
+ /* current number of mac_addr slots allocated. */
+ uint32_t nb_mac_addr;
+ struct ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR];
+ uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
+ /* current capabilities */
+ struct rte_eth_dev_info infos;
+ /*
+ * Fail-safe state machine.
+ * This level will be tracking state of the EAL and eth
+ * layer at large as defined by the user application.
+ * It will then steer the sub_devices toward the same
+ * synchronized state.
+ */
+ enum dev_state state;
+ unsigned int pending_alarm:1; /* An alarm is pending */
+ /* flow isolation state */
+ int flow_isolated:1;
+};
+
+/* MISC */
+
+int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev);
+int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev);
+
+/* RX / TX */
+
+void set_burst_fn(struct rte_eth_dev *dev, int force_safe);
+
+uint16_t failsafe_rx_burst(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t failsafe_tx_burst(void *txq,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+uint16_t failsafe_rx_burst_fast(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t failsafe_tx_burst_fast(void *txq,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+/* ARGS */
+
+int failsafe_args_parse(struct rte_eth_dev *dev, const char *params);
+void failsafe_args_free(struct rte_eth_dev *dev);
+int failsafe_args_count_subdevice(struct rte_eth_dev *dev, const char *params);
+int failsafe_args_parse_subs(struct rte_eth_dev *dev);
+
+/* EAL */
+
+int failsafe_eal_init(struct rte_eth_dev *dev);
+int failsafe_eal_uninit(struct rte_eth_dev *dev);
+
+/* ETH_DEV */
+
+int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
+void failsafe_dev_remove(struct rte_eth_dev *dev);
+int failsafe_eth_rmv_event_callback(uint8_t port_id,
+ enum rte_eth_event_type type,
+ void *arg, void *out);
+int failsafe_eth_lsc_event_callback(uint8_t port_id,
+ enum rte_eth_event_type event,
+ void *cb_arg, void *out);
+
+/* GLOBALS */
+
+extern const char pmd_failsafe_driver_name[];
+extern const struct eth_dev_ops failsafe_ops;
+extern const struct rte_flow_ops fs_flow_ops;
+extern uint64_t hotplug_poll;
+extern int mac_from_arg;
+
+/* HELPERS */
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define PRIV(dev) \
+ ((struct fs_priv *)(dev)->data->dev_private)
+
+/* sdev: (struct sub_device *) */
+#define ETH(sdev) \
+ ((sdev)->edev)
+
+/* sdev: (struct sub_device *) */
+#define PORT_ID(sdev) \
+ (ETH(sdev)->data->port_id)
+
+/* sdev: (struct sub_device *) */
+#define SUB_ID(sdev) \
+ ((sdev)->sid)
+
+/**
+ * Stateful iterator construct over fail-safe sub-devices:
+ * s: (struct sub_device *), iterator
+ * i: (uint8_t), increment
+ * dev: (struct rte_eth_dev *), fail-safe ethdev
+ * state: (enum dev_state), minimum acceptable device state
+ */
+#define FOREACH_SUBDEV_STATE(s, i, dev, state) \
+ for (i = fs_find_next((dev), 0, state); \
+ i < PRIV(dev)->subs_tail && (s = &PRIV(dev)->subs[i]); \
+ i = fs_find_next((dev), i + 1, state))
+
+/**
+ * Iterator construct over fail-safe sub-devices:
+ * s: (struct sub_device *), iterator
+ * i: (uint8_t), increment
+ * dev: (struct rte_eth_dev *), fail-safe ethdev
+ */
+#define FOREACH_SUBDEV(s, i, dev) \
+ FOREACH_SUBDEV_STATE(s, i, dev, DEV_UNDEFINED)
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define PREFERRED_SUBDEV(dev) \
+ (&PRIV(dev)->subs[0])
+
+/* dev: (struct rte_eth_dev *) fail-safe device */
+#define TX_SUBDEV(dev) \
+ (PRIV(dev)->subs_tx >= PRIV(dev)->subs_tail ? NULL \
+ : (PRIV(dev)->subs[PRIV(dev)->subs_tx].state < DEV_PROBED ? NULL \
+ : &PRIV(dev)->subs[PRIV(dev)->subs_tx]))
+
+/**
+ * s: (struct sub_device *)
+ * ops: (struct eth_dev_ops) member
+ */
+#define SUBOPS(s, ops) \
+ (ETH(s)->dev_ops->ops)
+
+/**
+ * Atomic guard
+ */
+
+/**
+ * a: (rte_atomic64_t)
+ */
+#define FS_ATOMIC_P(a) \
+ rte_atomic64_add(&(a), 1)
+
+/**
+ * a: (rte_atomic64_t)
+ */
+#define FS_ATOMIC_V(a) \
+ rte_atomic64_sub(&(a), 1)
+
+/**
+ * s: (struct sub_device *)
+ * i: uint16_t qid
+ */
+#define FS_ATOMIC_RX(s, i) \
+ rte_atomic64_read( \
+ &((struct rxq *)((s)->fs_dev->data->rx_queues[i]))->refcnt[(s)->sid] \
+ )
+/**
+ * s: (struct sub_device *)
+ * i: uint16_t qid
+ */
+#define FS_ATOMIC_TX(s, i) \
+ rte_atomic64_read( \
+ &((struct txq *)((s)->fs_dev->data->tx_queues[i]))->refcnt[(s)->sid] \
+ )
+
+#define LOG__(level, m, ...) \
+ RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__)
+#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
+#define DEBUG(...) LOG_(DEBUG, __VA_ARGS__)
+#define INFO(...) LOG_(INFO, __VA_ARGS__)
+#define WARN(...) LOG_(WARNING, __VA_ARGS__)
+#define ERROR(...) LOG_(ERR, __VA_ARGS__)
+
+/* inlined functions */
+
+static inline uint8_t
+fs_find_next(struct rte_eth_dev *dev, uint8_t sid,
+ enum dev_state min_state)
+{
+ while (sid < PRIV(dev)->subs_tail) {
+ if (PRIV(dev)->subs[sid].state >= min_state)
+ break;
+ sid++;
+ }
+ if (sid >= PRIV(dev)->subs_tail)
+ return PRIV(dev)->subs_tail;
+ return sid;
+}
+
+/*
+ * Switch emitting device.
+ * If banned is set, banned must not be considered for
+ * the role of emitting device.
+ */
+static inline void
+fs_switch_dev(struct rte_eth_dev *dev,
+ struct sub_device *banned)
+{
+ struct sub_device *txd;
+ enum dev_state req_state;
+
+ req_state = PRIV(dev)->state;
+ txd = TX_SUBDEV(dev);
+ if (PREFERRED_SUBDEV(dev)->state >= req_state &&
+ PREFERRED_SUBDEV(dev) != banned) {
+ if (txd != PREFERRED_SUBDEV(dev) &&
+ (txd == NULL ||
+ (req_state == DEV_STARTED) ||
+ (txd && txd->state < DEV_STARTED))) {
+ DEBUG("Switching tx_dev to preferred sub_device");
+ PRIV(dev)->subs_tx = 0;
+ }
+ } else if ((txd && txd->state < req_state) ||
+ txd == NULL ||
+ txd == banned) {
+ struct sub_device *sdev;
+ uint8_t i;
+
+ /* Using acceptable device */
+ FOREACH_SUBDEV_STATE(sdev, i, dev, req_state) {
+ if (sdev == banned)
+ continue;
+ DEBUG("Switching tx_dev to sub_device %d",
+ i);
+ PRIV(dev)->subs_tx = i;
+ break;
+ }
+ } else if (txd && txd->state < req_state) {
+ DEBUG("No device ready, deactivating tx_dev");
+ PRIV(dev)->subs_tx = PRIV(dev)->subs_tail;
+ } else {
+ return;
+ }
+ set_burst_fn(dev, 0);
+ rte_wmb();
+}
+
+#endif /* _RTE_ETH_FAILSAFE_PRIVATE_H_ */
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
new file mode 100644
index 00000000..73114215
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -0,0 +1,203 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+#include "failsafe_private.h"
+
+static inline int
+fs_rx_unsafe(struct sub_device *sdev)
+{
+ return (ETH(sdev) == NULL) ||
+ (ETH(sdev)->rx_pkt_burst == NULL) ||
+ (sdev->state != DEV_STARTED);
+}
+
+static inline int
+fs_tx_unsafe(struct sub_device *sdev)
+{
+ return (sdev == NULL) ||
+ (ETH(sdev) == NULL) ||
+ (ETH(sdev)->tx_pkt_burst == NULL) ||
+ (sdev->state != DEV_STARTED);
+}
+
+void
+set_burst_fn(struct rte_eth_dev *dev, int force_safe)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int need_safe;
+ int safe_set;
+
+ need_safe = force_safe;
+ FOREACH_SUBDEV(sdev, i, dev)
+ need_safe |= fs_rx_unsafe(sdev);
+ safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst);
+ if (need_safe && !safe_set) {
+ DEBUG("Using safe RX bursts%s",
+ (force_safe ? " (forced)" : ""));
+ dev->rx_pkt_burst = &failsafe_rx_burst;
+ } else if (!need_safe && safe_set) {
+ DEBUG("Using fast RX bursts");
+ dev->rx_pkt_burst = &failsafe_rx_burst_fast;
+ }
+ need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev));
+ safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst);
+ if (need_safe && !safe_set) {
+ DEBUG("Using safe TX bursts%s",
+ (force_safe ? " (forced)" : ""));
+ dev->tx_pkt_burst = &failsafe_tx_burst;
+ } else if (!need_safe && safe_set) {
+ DEBUG("Using fast TX bursts");
+ dev->tx_pkt_burst = &failsafe_tx_burst_fast;
+ }
+ rte_wmb();
+}
+
+uint16_t
+failsafe_rx_burst(void *queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ void *sub_rxq;
+ uint16_t nb_rx;
+ uint8_t nb_polled, nb_subs;
+ uint8_t i;
+
+ rxq = queue;
+ priv = rxq->priv;
+ nb_subs = priv->subs_tail - priv->subs_head;
+ nb_polled = 0;
+ for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
+ i++;
+ if (i == priv->subs_tail)
+ i = priv->subs_head;
+ sdev = &priv->subs[i];
+ if (unlikely(fs_rx_unsafe(sdev)))
+ continue;
+ sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
+ nb_rx = ETH(sdev)->
+ rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ if (nb_rx) {
+ rxq->last_polled = i;
+ return nb_rx;
+ }
+ }
+ return 0;
+}
+
+uint16_t
+failsafe_rx_burst_fast(void *queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct fs_priv *priv;
+ struct sub_device *sdev;
+ struct rxq *rxq;
+ void *sub_rxq;
+ uint16_t nb_rx;
+ uint8_t nb_polled, nb_subs;
+ uint8_t i;
+
+ rxq = queue;
+ priv = rxq->priv;
+ nb_subs = priv->subs_tail - priv->subs_head;
+ nb_polled = 0;
+ for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
+ i++;
+ if (i == priv->subs_tail)
+ i = priv->subs_head;
+ sdev = &priv->subs[i];
+ RTE_ASSERT(!fs_rx_unsafe(sdev));
+ sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
+ FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
+ nb_rx = ETH(sdev)->
+ rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
+ FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
+ if (nb_rx) {
+ rxq->last_polled = i;
+ return nb_rx;
+ }
+ }
+ return 0;
+}
+
+uint16_t
+failsafe_tx_burst(void *queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ void *sub_txq;
+ uint16_t nb_tx;
+
+ txq = queue;
+ sdev = TX_SUBDEV(txq->priv->dev);
+ if (unlikely(fs_tx_unsafe(sdev)))
+ return 0;
+ sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
+}
+
+uint16_t
+failsafe_tx_burst_fast(void *queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sub_device *sdev;
+ struct txq *txq;
+ void *sub_txq;
+ uint16_t nb_tx;
+
+ txq = queue;
+ sdev = TX_SUBDEV(txq->priv->dev);
+ RTE_ASSERT(!fs_tx_unsafe(sdev));
+ sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
+ FS_ATOMIC_P(txq->refcnt[sdev->sid]);
+ nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
+ FS_ATOMIC_V(txq->refcnt[sdev->sid]);
+ return nb_tx;
+}
diff --git a/drivers/net/failsafe/rte_pmd_failsafe_version.map b/drivers/net/failsafe/rte_pmd_failsafe_version.map
new file mode 100644
index 00000000..b6d2840b
--- /dev/null
+++ b/drivers/net/failsafe/rte_pmd_failsafe_version.map
@@ -0,0 +1,4 @@
+DPDK_17.08 {
+
+ local: *;
+};
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7363defa..e60d3a36 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -84,6 +84,7 @@ static void fm10k_rx_queue_release(void *queue);
static void fm10k_set_rx_function(struct rte_eth_dev *dev);
static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
+static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -712,7 +713,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i, ret;
struct fm10k_rx_queue *rxq;
@@ -1166,6 +1167,8 @@ fm10k_dev_start(struct rte_eth_dev *dev)
if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+ fm10k_link_update(dev, 0);
+
return 0;
}
@@ -1173,7 +1176,7 @@ static void
fm10k_dev_stop(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i;
@@ -1393,7 +1396,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
PMD_INIT_FUNC_TRACE();
@@ -1588,7 +1591,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static void
-fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
+fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
@@ -2345,7 +2348,7 @@ static int
fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
/* Enable ITR */
if (hw->mac.type == fm10k_mac_pf)
@@ -2362,7 +2365,7 @@ static int
fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
/* Disable ITR */
if (hw->mac.type == fm10k_mac_pf)
@@ -2378,7 +2381,7 @@ static int
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
uint32_t intr_vector, vec;
uint16_t queue_id;
@@ -2882,7 +2885,7 @@ static int
eth_fm10k_dev_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int diag, i;
struct fm10k_macvlan_filter_info *macvlan;
@@ -3069,7 +3072,7 @@ static int
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
PMD_INIT_FUNC_TRACE();
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 411bc445..d23bfe9b 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -331,6 +331,8 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
*(uint64_t *)p1 = rxq->mbuf_initializer;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -448,6 +450,19 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
0xFF, 0xFF, /* skip high 16 bits pkt_type */
0xFF, 0xFF /* Skip pkt_type field in shuffle operation */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
@@ -738,7 +753,7 @@ vtx(volatile struct fm10k_tx_desc *txdp,
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
{
struct rte_mbuf **txep;
@@ -794,7 +809,7 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
return txq->rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct rte_mbuf **txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 56f210d6..55c79a60 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -109,11 +109,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
-
-# vector PMD driver needs SSE4.1 support
-ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
-CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1
-endif
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
index 0da9f674..59e76c21 100644
--- a/drivers/net/i40e/base/README
+++ b/drivers/net/i40e/base/README
@@ -34,7 +34,7 @@ Intel® I40E driver
==================
This directory contains source code of FreeBSD i40e driver of version
-cid-i40e.2017.03.21.tar.gz released by the team which develops
+cid-i40e.2017.06.23.tar.gz released by the team which develops
basic drivers for any i40e NIC. The directory of base/ contains the
original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index a60292a3..8cc8c5ec 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -682,6 +682,18 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 7)))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ }
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 09f5bf5c..c36da2a3 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -41,7 +41,15 @@ POSSIBILITY OF SUCH DAMAGE.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@@ -245,6 +253,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -777,7 +787,22 @@ struct i40e_aqc_set_switch_config {
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
- u8 reserved[12];
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1829,6 +1854,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1847,7 +1874,11 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_MAX
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
@@ -1904,6 +1935,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2033,19 +2066,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 power_desc;
+ union {
+ struct {
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -2128,6 +2173,22 @@ struct i40e_aqc_run_phy_activity {
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_addres;
+ u8 reserved1[2];
+ u32 reg_address;
+ u32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 03e94bc8..900d379c 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -34,7 +34,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/**
@@ -93,6 +93,7 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
#endif
@@ -329,13 +330,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = LE16_TO_CPU(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
+ u16 len;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
+ len = LE16_TO_CPU(aq_desc->datalen);
+
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
@@ -1295,6 +1298,8 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1377,6 +1382,8 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
+
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
@@ -1384,6 +1391,12 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Core reset upcoming.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
i40e_msec_delay(1);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
@@ -1689,8 +1702,15 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = I40E_ERR_UNKNOWN_PHY;
if (report_init) {
- hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
- hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ } else {
+ hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
}
return status;
@@ -1952,7 +1972,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
- hw_link_info->loopback = resp->loopback;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
@@ -1983,6 +2003,12 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= 7) {
+ hw->phy.phy_types = LE32_TO_CPU(*(__le32 *)resp->link_type);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
@@ -2679,7 +2705,11 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = CPU_TO_LE16(flags);
scfg->valid_flags = CPU_TO_LE16(valid_flags);
-
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
+ scfg->first_tag = CPU_TO_LE16(hw->first_tag);
+ scfg->second_tag = CPU_TO_LE16(hw->second_tag);
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -2825,6 +2855,10 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
@@ -5523,7 +5557,7 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
- i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
cmd->etype = CPU_TO_LE16(ethtype);
@@ -6654,24 +6688,38 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 temp_addr;
u8 port_num;
u32 i;
-
- temp_addr = I40E_PHY_LED_PROV_REG_1;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- temp_addr++) {
- status = i40e_read_phy_register_clause45(hw,
+ u32 reg_val_aq;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status)
+ return status;
+ *val = (u16)reg_val_aq;
+ } else {
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status =
+ i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
temp_addr, phy_addr,
&reg_val);
- if (status)
- return status;
- *val = reg_val;
- if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
- *led_addr = temp_addr;
- break;
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
}
}
return status;
@@ -6689,51 +6737,115 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
enum i40e_status_code status = I40E_SUCCESS;
- u16 led_ctl = 0;
- u16 led_reg = 0;
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
u8 phy_addr = 0;
u8 port_num;
u32 i;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &led_reg, NULL);
+ } else {
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)&led_reg);
+ }
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_reg, NULL);
+ } else {
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_reg);
+ }
if (status)
return status;
}
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &led_reg, NULL);
+ } else {
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)&led_reg);
+ }
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_reg, NULL);
+ } else {
+ status =
+ i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_reg);
+ }
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_ctl, NULL);
+ } else {
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_ctl);
+ }
}
return status;
restore_config:
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ led_ctl, NULL);
+ } else {
+ status =
+ i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)led_ctl);
+ }
return status;
}
#endif /* PF_DRIVER */
@@ -6863,6 +6975,76 @@ do_retry:
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
+
+/**
+ * i40e_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = reg_addr;
+ cmd->reg_value = reg_val;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = reg_addr;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = cmd->reg_value;
+
+ return status;
+}
+
#ifdef VF_DRIVER
/**
@@ -6879,7 +7061,7 @@ do_retry:
* completion before returning.
**/
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@@ -6918,9 +7100,9 @@ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg)
+ struct virtchnl_vf_resource *msg)
{
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@@ -6930,19 +7112,17 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.iwarp = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
i40e_memcpy(hw->mac.perm_addr,
vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
- I40E_ETH_LENGTH_OF_ADDRESS,
+ ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
}
vsi_res++;
@@ -6959,7 +7139,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
{
- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
@@ -7261,6 +7441,165 @@ i40e_find_segment_in_package(u32 segment_type,
return NULL;
}
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+STATIC enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= CPU_TO_LE16(aq->flags);
+ i40e_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status != I40E_SUCCESS) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ i40e_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+STATIC enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && (i == dev_cnt)) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
/**
* i40e_write_profile
* @hw: pointer to the hardware structure
@@ -7276,52 +7615,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
- u32 dev_cnt;
- u32 vendor_dev_id;
- u32 *nvm;
+ struct i40e_profile_aq_section *ddp_aq;
u32 section_size = 0;
u32 offset = 0, info = 0;
+ u32 sec_off;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
- dev_cnt = profile->device_table_count;
+ I40E_SECTION_TABLE(profile, sec_tbl);
- for (i = 0; i < dev_cnt; i++) {
- vendor_dev_id = profile->device_table[i].vendor_dev_id;
- if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID)
- if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
}
- if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
+ return status;
+}
- nvm = (u32 *)&profile->device_table[dev_cnt];
- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec = (struct i40e_profile_section_header *)((u8 *)profile +
- sec_tbl->section_offset[i]);
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
- /* Skip 'AQ', 'note' and 'name' sections */
- if (sec->section.type != SECTION_TYPE_MMIO)
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
continue;
section_size = sec->section.size +
sizeof(struct i40e_profile_section_header);
- /* Write profile */
+ /* Write roll-back MMIO section */
status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
- "Failed to write profile: offset %d, info %d",
- offset, info);
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
break;
}
}
@@ -7359,9 +7745,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
pinfo->track_id = track_id;
pinfo->version = profile->version;
pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+ i40e_memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE,
+ I40E_NONDMA_TO_NONDMA);
status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
+ track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 4546689a..f4a87842 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -54,6 +54,7 @@ POSSIBILITY OF SUCH DAMAGE.
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#endif /* VF_DRIVER */
#ifdef X722_A0_SUPPORT
#define I40E_DEV_ID_X722_A0 0x374C
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index e8965024..a1e78300 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -749,12 +749,18 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
DEBUGFUNC("i40e_validate_nvm_checksum");
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ /* acquire_nvm provides exclusive NVM lock to synchronize access across
+ * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
+ * twice (first time to be able to write address to I40E_GLNVM_SRCTL
+ * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
+ * done bit and try to write address, while another one will interpret
+ * it as a good time to read data. It will cause invalid data to be
+ * read.
+ */
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- i40e_release_nvm(hw);
+ i40e_release_nvm(hw);
if (ret_code != I40E_SUCCESS)
goto i40e_validate_nvm_checksum_exit;
} else {
@@ -899,6 +905,11 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated
+ */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -934,6 +945,7 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
return status;
}
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 4bd589e7..acb2023f 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -36,7 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -504,10 +504,10 @@ void i40e_destroy_spinlock(struct i40e_spinlock *sp);
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -533,6 +533,15 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
@@ -566,19 +575,27 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
enum i40e_status_code
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index b150fbdf..a482ab90 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -2805,7 +2805,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 84d57576..dca725af 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -92,7 +92,9 @@ POSSIBILITY OF SUCH DAMAGE.
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
/* Data type manipulation macros. */
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
@@ -197,10 +199,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0005
-
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -269,6 +267,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 req_fec_info;
u8 fec_info;
u8 ext_info;
u8 loopback;
@@ -439,10 +438,10 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -701,8 +700,15 @@ struct i40e_hw {
u16 wol_proxy_vsi_seid;
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
u64 flags;
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
@@ -1912,8 +1918,10 @@ struct i40e_generic_seg_header {
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY 0
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
u32 track_id;
- char name[I40E_DDP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1940,15 +1948,36 @@ struct i40e_profile_section_header {
struct {
#define SECTION_TYPE_INFO 0x00000010
#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
#define SECTION_TYPE_NOTE 0x80000000
#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
u32 type;
u32 offset;
u32 size;
} section;
};
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
struct i40e_profile_info {
u32 track_id;
struct i40e_ddp_version version;
diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h
deleted file mode 100644
index 7a24c0f1..00000000
--- a/drivers/net/i40e/base/i40e_virtchnl.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
-#ifdef I40E_SOL_VF_SUPPORT
- I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
-#endif
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum i40e_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-#ifdef I40E_SOL_VF_SUPPORT
-/* I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
- * VF sends this message to get the default MTU and list of additional ethernet
- * addresses it is allowed to use.
- * PF responds with an indirect message containing
- * i40e_virtchnl_addnl_solaris_config with zero or more
- * i40e_virtchnl_ether_addr structures.
- *
- * It is expected that this operation will only ever be needed for Solaris VFs
- * running under a Solaris PF.
- */
-struct i40e_virtchnl_addnl_solaris_config {
- u16 default_mtu;
- struct i40e_virtchnl_ether_addr_list al;
-};
-
-#endif
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the rss fields in
- * the vf resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
-#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h
new file mode 100644
index 00000000..f00dd360
--- /dev/null
+++ b/drivers/net/i40e/base/virtchnl.h
@@ -0,0 +1,772 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+ VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
+#ifdef VIRTCHNL_IWARP
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+#endif
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF offload flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support; otherwise it will respond with the
+ * number requested.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * virtchnl_addnl_solaris_config with zero or more
+ * virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct virtchnl_ether_addr_list al;
+};
+
+#endif
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct eth_stats in an external buffer.
+ */
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+#ifdef VIRTCHNL_IWARP
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+#endif
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+#ifdef VIRTCHNL_IWARP
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+#endif
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format))
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fd7d3475..5f26e24a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -74,7 +74,7 @@
/* Maximun number of capability elements */
#define I40E_MAX_CAP_ELE_NUM 128
-/* Wait count and inteval */
+/* Wait count and interval */
#define I40E_CHK_Q_ENA_COUNT 1000
#define I40E_CHK_Q_ENA_INTERVAL_US 1000
@@ -515,6 +515,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.get_eeprom = i40e_get_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
+ .tm_ops_get = i40e_tm_ops_get,
};
/* store statistics names and its offset in stats structure */
@@ -879,7 +880,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -919,7 +920,7 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
/* Initialize ethertype filter rule list and hash */
TAILQ_INIT(&ethertype_rule->ethertype_list);
snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
- "ethertype_%s", dev->data->name);
+ "ethertype_%s", dev->device->name);
ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
if (!ethertype_rule->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
@@ -964,7 +965,7 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
/* Initialize tunnel filter rule list and hash */
TAILQ_INIT(&tunnel_rule->tunnel_list);
snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
- "tunnel_%s", dev->data->name);
+ "tunnel_%s", dev->device->name);
tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
if (!tunnel_rule->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
@@ -1009,7 +1010,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
/* Initialize flow director filter rule list and hash */
TAILQ_INIT(&fdir_info->fdir_list);
snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
- "fdir_%s", dev->data->name);
+ "fdir_%s", dev->device->name);
fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
if (!fdir_info->hash_table) {
PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
@@ -1061,7 +1062,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return 0;
}
i40e_set_default_ptype_table(dev);
- pci_dev = I40E_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
@@ -1142,11 +1143,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
config_floating_veb(dev);
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
- ret = i40e_dev_sync_phy_type(hw);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
- goto err_sync_phy_type;
- }
+ i40e_dev_sync_phy_type(hw);
+
/*
* On X710, performance number is far from the expectation on recent
* firmware versions. The fix for this issue may not be integrated in
@@ -1298,6 +1296,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
+ /* initialize Traffic Manager configuration */
+ i40e_tm_conf_init(dev);
+
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
@@ -1331,7 +1332,6 @@ err_msix_pool_init:
err_qp_pool_init:
err_parameter_init:
err_get_capabilities:
-err_sync_phy_type:
(void)i40e_shutdown_adminq(hw);
return ret;
@@ -1414,7 +1414,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = I40E_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
@@ -1461,6 +1461,9 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_flow);
}
+ /* Remove all Traffic Manager configuration */
+ i40e_tm_conf_uninit(dev);
+
return 0;
}
@@ -1470,9 +1473,14 @@ i40e_dev_configure(struct rte_eth_dev *dev)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int i, ret;
+ ret = i40e_dev_sync_phy_type(hw);
+ if (ret)
+ return ret;
+
/* Initialize to TRUE. If any of Rx queues doesn't meet the
* bulk allocation or vector Rx preconditions we will reset it.
*/
@@ -1547,7 +1555,7 @@ void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
@@ -1661,7 +1669,7 @@ void
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
@@ -1733,7 +1741,7 @@ static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
@@ -1765,7 +1773,7 @@ static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
@@ -1806,11 +1814,15 @@ i40e_parse_link_speeds(uint16_t link_speeds)
static int
i40e_phy_conf_link(struct i40e_hw *hw,
uint8_t abilities,
- uint8_t force_speed)
+ uint8_t force_speed,
+ bool is_up)
{
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
+ enum i40e_aq_phy_type cnt;
+ uint32_t phy_type_mask = 0;
+
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
@@ -1828,6 +1840,10 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (status)
return ret;
+ /* If link already up, no need to set up again */
+ if (is_up && phy_ab.phy_type != 0)
+ return I40E_SUCCESS;
+
memset(&phy_conf, 0, sizeof(phy_conf));
/* bits 0-2 use the values from get_phy_abilities_resp */
@@ -1838,13 +1854,21 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (abilities & I40E_AQ_PHY_AN_ENABLED)
phy_conf.link_speed = advt;
else
- phy_conf.link_speed = force_speed;
+ phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
phy_conf.abilities = abilities;
+
+
+ /* To enable link, phy_type mask needs to include each type */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ phy_type_mask |= 1 << cnt;
+
/* use get_phy_abilities_resp value for the rest */
- phy_conf.phy_type = phy_ab.phy_type;
- phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+ phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
+ phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
+ I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
+ I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
@@ -1876,13 +1900,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_AN_ENABLED;
abilities |= I40E_AQ_PHY_LINK_ENABLED;
- /* Skip changing speed on 40G interfaces, FW does not support */
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
- speed = I40E_LINK_SPEED_UNKNOWN;
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- }
-
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, true);
}
static int
@@ -1892,7 +1910,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
struct i40e_vsi *vsi;
@@ -2008,7 +2026,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO,
"lsc won't enable because of no intr multiplex");
- } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ } else {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL |
@@ -2016,7 +2034,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable LSE */
+ /* Call get_link_info aq commond to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
@@ -2025,6 +2043,11 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_filter_restore(pf);
+ if (pf->tm_conf.root && !pf->tm_conf.committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
return I40E_SUCCESS;
err_up:
@@ -2038,12 +2061,15 @@ static void
i40e_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
+ if (hw->adapter_stopped == 1)
+ return;
/* Disable all queues */
i40e_dev_switch_queues(pf, FALSE);
@@ -2085,6 +2111,11 @@ i40e_dev_stop(struct rte_eth_dev *dev)
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+
+ /* reset hierarchy commit */
+ pf->tm_conf.committed = false;
+
+ hw->adapter_stopped = 1;
}
static void
@@ -2092,7 +2123,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
@@ -2100,7 +2131,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
i40e_dev_stop(dev);
- hw->adapter_stopped = 1;
i40e_dev_free_queues(dev);
/* Disable interrupt */
@@ -2225,7 +2255,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, false);
}
int
@@ -2352,9 +2382,6 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
vsi->offset_loaded, &oes->tx_broadcast,
&nes->tx_broadcast);
- /* exclude CRC bytes */
- nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
- nes->tx_broadcast) * ETHER_CRC_LEN;
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
@@ -2390,14 +2417,35 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
I40E_GLV_GORCL(hw->port),
pf->offset_loaded,
- &pf->internal_rx_bytes_offset,
- &pf->internal_rx_bytes);
+ &pf->internal_stats_offset.rx_bytes,
+ &pf->internal_stats.rx_bytes);
i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
I40E_GLV_GOTCL(hw->port),
pf->offset_loaded,
- &pf->internal_tx_bytes_offset,
- &pf->internal_tx_bytes);
+ &pf->internal_stats_offset.tx_bytes,
+ &pf->internal_stats.tx_bytes);
+ /* Get total internal rx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
+ I40E_GLV_UPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_unicast,
+ &pf->internal_stats.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
+ I40E_GLV_MPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_multicast,
+ &pf->internal_stats.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
+ I40E_GLV_BPRCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.rx_broadcast,
+ &pf->internal_stats.rx_broadcast);
+
+ /* exclude CRC size */
+ pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
+ pf->internal_stats.rx_multicast +
+ pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
@@ -2420,7 +2468,17 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes;
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+
+ /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+ * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
+ * value.
+ */
+ if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
+ ns->eth.rx_bytes = 0;
+ /* exlude internal rx bytes */
+ else
+ ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
@@ -2448,7 +2506,14 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes;
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+
+ /* exclude internal tx bytes */
+ if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
+ ns->eth.tx_bytes = 0;
+ else
+ ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
+
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -2869,7 +2934,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
@@ -2973,71 +3038,93 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static int
-i40e_vlan_tpid_set(struct rte_eth_dev *dev,
- enum rte_vlan_type vlan_type,
- uint16_t tpid)
+i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid, int qinq)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint64_t reg_r = 0, reg_w = 0;
- uint16_t reg_id = 0;
- int ret = 0;
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint64_t reg_w = 0;
+ uint16_t reg_id = 3;
+ int ret;
- switch (vlan_type) {
- case ETH_VLAN_TYPE_OUTER:
- if (qinq)
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
reg_id = 2;
- else
- reg_id = 3;
- break;
- case ETH_VLAN_TYPE_INNER:
- if (qinq)
- reg_id = 3;
- else {
- ret = -EINVAL;
- PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.");
- return ret;
- }
- break;
- default:
- ret = -EINVAL;
- PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
- return ret;
}
+
ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
&reg_r, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
"Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
reg_id);
- ret = -EIO;
- return ret;
+ return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
- reg_id, reg_r);
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
if (reg_r == reg_w) {
- ret = 0;
PMD_DRV_LOG(DEBUG, "No need to write");
- return ret;
+ return 0;
}
ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
reg_w, NULL);
if (ret != I40E_SUCCESS) {
- ret = -EIO;
PMD_DRV_LOG(ERR,
- "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_id);
- return ret;
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
+ return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_w, reg_id);
+ "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_w, reg_id);
+
+ return 0;
+}
+
+static int
+i40e_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int ret = 0;
+
+ if ((vlan_type != ETH_VLAN_TYPE_INNER &&
+ vlan_type != ETH_VLAN_TYPE_OUTER) ||
+ (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported vlan type.");
+ return -EINVAL;
+ }
+ /* 802.1ad frames ability is added in NVM API 1.7*/
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ if (qinq) {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->first_tag = rte_cpu_to_le_16(tpid);
+ else if (vlan_type == ETH_VLAN_TYPE_INNER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ } else {
+ if (vlan_type == ETH_VLAN_TYPE_OUTER)
+ hw->second_tag = rte_cpu_to_le_16(tpid);
+ }
+ ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Set switch config failed aq_err: %d",
+ hw->aq.asq_last_status);
+ ret = -EIO;
+ }
+ } else
+ /* If NVM API < 1.7, keep the register setting */
+ ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
+ tpid, qinq);
return ret;
}
@@ -3066,7 +3153,7 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_EXTEND_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
i40e_vsi_config_double_vlan(vsi, TRUE);
- /* Set global registers with default ether type value */
+ /* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
ETHER_TYPE_VLAN);
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
@@ -3788,7 +3875,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
@@ -4284,6 +4371,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
if (enabled_tcmap & (1 << i))
total_tc++;
+ if (total_tc == 0)
+ total_tc = 1;
vsi->enabled_tc = enabled_tcmap;
/* Number of queues per enabled TC */
@@ -5216,10 +5305,8 @@ i40e_pf_setup(struct i40e_pf *pf)
pf->offset_loaded = FALSE;
memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
- pf->internal_rx_bytes = 0;
- pf->internal_tx_bytes = 0;
- pf->internal_rx_bytes_offset = 0;
- pf->internal_tx_bytes_offset = 0;
+ memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
+ memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
ret = i40e_pf_get_switch_config(pf);
if (ret != I40E_SUCCESS) {
@@ -5734,16 +5821,16 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
index = abs_vf_id / I40E_UINT32_BIT_SIZE;
offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
- /* VFR event occured */
+ /* VFR event occurred */
if (val & (0x1 << offset)) {
int ret;
/* Clear the event first */
I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
(0x1 << offset));
- PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
+ PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
/**
- * Only notify a VF reset event occured,
+ * Only notify a VF reset event occurred,
* don't trigger another SW reset
*/
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
@@ -5804,7 +5891,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_dev_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
break;
default:
PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -7460,7 +7547,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
/*
* If both VMDQ and RSS enabled, not all of PF queues are configured.
- * It's necessary to calulate the actual PF queues that are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
num = i40e_pf_calc_configured_queues_num(pf);
@@ -8136,7 +8223,7 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
/**
* Validate if the input set is allowed for a specific PCTYPE
*/
-static int
+int
i40e_validate_input_set(enum i40e_filter_pctype pctype,
enum rte_filter_type filter, uint64_t inset)
{
@@ -8311,7 +8398,7 @@ i40e_parse_input_set(uint64_t *inset,
* Translate the input set from bit masks to register aware bit masks
* and vice versa
*/
-static uint64_t
+uint64_t
i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
{
uint64_t val = 0;
@@ -8396,7 +8483,7 @@ i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
return val;
}
-static int
+int
i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
{
uint8_t i, idx = 0;
@@ -8444,7 +8531,7 @@ i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
return idx;
}
-static void
+void
i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
@@ -9012,7 +9099,7 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
@@ -9150,8 +9237,9 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
*/
/* For both X710 and XL710 */
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
@@ -9172,13 +9260,22 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw)
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
int ret = -ENOTSUP;
+ int retries = 0;
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
NULL);
- if (status)
- return ret;
-
+ while (status) {
+ PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
+ status);
+ retries++;
+ rte_delay_us(100000);
+ if (retries < 5)
+ status = i40e_aq_get_phy_capabilities(hw, false,
+ true, &phy_ab, NULL);
+ else
+ return ret;
+ }
return 0;
}
@@ -9203,8 +9300,12 @@ i40e_configure_registers(struct i40e_hw *hw)
reg_table[i].val =
I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
else /* For X710/XL710/XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ if (hw->aq.fw_maj_ver < 6)
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
+ else
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
}
if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
@@ -10460,7 +10561,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
@@ -10494,7 +10595,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -10738,8 +10839,7 @@ i40e_filter_restore(struct i40e_pf *pf)
static bool
is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- if (strcmp(dev->data->drv_name,
- drv->driver.name))
+ if (strcmp(dev->device->driver->name, drv->driver.name))
return false;
return true;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b0d963c1..48abc05a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -39,6 +39,7 @@
#include <rte_kvargs.h>
#include <rte_hash.h>
#include <rte_flow_driver.h>
+#include <rte_tm_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -102,7 +103,7 @@
/* Linux PF host with virtchnl version 1.1 */
#define PF_IS_V11(vf) \
- (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
+ (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \
((vf)->version_minor == 1))
/* index flex payload per layer */
@@ -251,6 +252,15 @@ enum i40e_flxpld_layer_idx {
I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
/**
* The overhead from MTU to max frame size.
* Considering QinQ packet, the VLAN tag needs to be counted twice.
@@ -405,7 +415,7 @@ enum I40E_VF_STATE {
struct i40e_pf_vf {
struct i40e_pf *pf;
struct i40e_vsi *vsi;
- enum I40E_VF_STATE state; /* The number of queue pairs availiable */
+ enum I40E_VF_STATE state; /* The number of queue pairs available */
uint16_t vf_idx; /* VF index in pf->vfs */
uint16_t lan_nb_qps; /* Actual queues allocated */
uint16_t reset_cnt; /* Total vf reset times */
@@ -431,6 +441,25 @@ struct i40e_vmdq_info {
struct i40e_vsi *vsi;
};
+#define I40E_FDIR_MAX_FLEXLEN 16 /**< Max length of flexbytes. */
+#define I40E_MAX_FLX_SOURCE_OFF 480
+#define NONUSE_FLX_PIT_DEST_OFF 63
+#define NONUSE_FLX_PIT_FSIZE 1
+#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
+#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
+ (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
+ (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
+ ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
+ NONUSE_FLX_PIT_DEST_OFF : \
+ ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
+#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
+#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
+#define I40E_FDIR_IPv6_TC_OFFSET 20
+
/*
* Structure to store flex pit for flow diretor.
*/
@@ -442,6 +471,7 @@ struct i40e_fdir_flex_pit {
struct i40e_fdir_flex_mask {
uint8_t word_mask; /**< Bit i enables word i of flexible payload */
+ uint8_t nb_bitmask;
struct {
uint8_t offset;
uint16_t mask;
@@ -479,6 +509,12 @@ struct i40e_fdir_info {
struct i40e_fdir_filter_list fdir_list;
struct i40e_fdir_filter **hash_map;
struct rte_hash *hash_table;
+
+ /* Mark if flex pit and mask is set */
+ bool flex_pit_flag[I40E_MAX_FLXPLD_LAYER];
+ bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX];
+
+ bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */
};
/* Ethertype filter number HW supports */
@@ -625,6 +661,67 @@ struct rte_flow {
TAILQ_HEAD(i40e_flow_list, rte_flow);
+/* Struct to store Traffic Manager shaper profile. */
+struct i40e_tm_shaper_profile {
+ TAILQ_ENTRY(i40e_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(i40e_shaper_profile_list, i40e_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum i40e_tm_node_type {
+ I40E_TM_NODE_TYPE_PORT,
+ I40E_TM_NODE_TYPE_TC,
+ I40E_TM_NODE_TYPE_QUEUE,
+ I40E_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct i40e_tm_node {
+ TAILQ_ENTRY(i40e_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ struct i40e_tm_node *parent;
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(i40e_tm_node_list, i40e_tm_node);
+
+/* Struct to store all the Traffic Manager configuration. */
+struct i40e_tm_conf {
+ struct i40e_shaper_profile_list shaper_profile_list;
+ struct i40e_tm_node *root; /* root node - port */
+ struct i40e_tm_node_list tc_list; /* node list for all the TCs */
+ struct i40e_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -639,11 +736,9 @@ struct i40e_pf {
struct i40e_hw_port_stats stats_offset;
struct i40e_hw_port_stats stats;
- /* internal packet byte count, it should be excluded from the total */
- uint64_t internal_rx_bytes;
- uint64_t internal_tx_bytes;
- uint64_t internal_rx_bytes_offset;
- uint64_t internal_tx_bytes_offset;
+ /* internal packet statistics, it should be excluded from the total */
+ struct i40e_eth_stats internal_stats_offset;
+ struct i40e_eth_stats internal_stats;
bool offset_loaded;
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
@@ -690,6 +785,7 @@ struct i40e_pf {
struct i40e_flow_list flow_list;
bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
bool qinq_replace_flag; /* QINQ filter replace is done */
+ struct i40e_tm_conf tm_conf;
};
enum pending_msg {
@@ -742,7 +838,7 @@ struct i40e_vf {
/* Event from pf */
bool dev_closed;
bool link_up;
- enum i40e_aq_link_speed link_speed;
+ enum virtchnl_link_speed link_speed;
bool vf_reset;
volatile uint32_t pend_cmd; /* pending command not finished yet */
int32_t cmd_retval; /* return value of the cmd response from PF */
@@ -750,8 +846,8 @@ struct i40e_vf {
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
/* VSI info */
- struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */
- struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ struct virtchnl_vf_resource *vf_res; /* All VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
struct i40e_vsi vsi;
uint64_t flags;
};
@@ -822,8 +918,7 @@ int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr);
void i40e_update_vsi_stats(struct i40e_vsi *vsi);
void i40e_pf_disable_irq0(struct i40e_hw *hw);
void i40e_pf_enable_irq0(struct i40e_hw *hw);
-int i40e_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi);
void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi);
int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
@@ -898,6 +993,17 @@ int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
int total);
bool is_i40e_supported(struct rte_eth_dev *dev);
+int i40e_validate_input_set(enum i40e_filter_pctype pctype,
+ enum rte_filter_type filter, uint64_t inset);
+int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask,
+ uint8_t nb_elem);
+uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input);
+void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
+
+int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void i40e_tm_conf_init(struct rte_eth_dev *dev);
+void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 6e5839dd..f6d82934 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -56,7 +56,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -77,7 +76,7 @@
#define MAX_RESET_WAIT_CNT 20
struct i40evf_arq_msg_info {
- enum i40e_virtchnl_ops ops;
+ enum virtchnl_ops ops;
enum i40e_status_code result;
uint16_t buf_len;
uint16_t msg_len;
@@ -85,7 +84,7 @@ struct i40evf_arq_msg_info {
};
struct vf_cmd_info {
- enum i40e_virtchnl_ops ops;
+ enum virtchnl_ops ops;
uint8_t *in_args;
uint32_t in_args_size;
uint8_t *out_buffer;
@@ -108,7 +107,7 @@ static void i40evf_dev_stop(struct rte_eth_dev *dev);
static void i40evf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40evf_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
@@ -159,7 +158,7 @@ static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
-static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
+static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
uint8_t *msg,
uint16_t msglen);
@@ -244,7 +243,7 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops opcode;
+ enum virtchnl_ops opcode;
enum i40e_status_code retval;
int ret;
enum i40evf_aq_result result = I40EVF_MSG_NON;
@@ -259,16 +258,16 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
return result;
}
- opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
/* pf sys event */
- if (opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)event.msg_buf;
+ if (opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event.msg_buf;
result = I40EVF_MSG_SYS;
switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
vf->link_up =
vpe->event_data.link_event.link_status;
vf->link_speed =
@@ -277,12 +276,12 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
PMD_DRV_LOG(INFO, "Link status update:%s",
vf->link_up ? "up" : "down");
break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
vf->vf_reset = true;
vf->pend_msg |= PFMSG_RESET_IMPENDING;
PMD_DRV_LOG(INFO, "vf is reseting");
break;
- case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
vf->dev_closed = true;
vf->pend_msg |= PFMSG_DRIVER_CLOSE;
PMD_DRV_LOG(INFO, "PF driver closed");
@@ -312,17 +311,17 @@ static inline void
_clear_cmd(struct i40e_vf *vf)
{
rte_wmb();
- vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
}
/*
* Check there is pending cmd in execution. If none, set new command.
*/
static inline int
-_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
+_atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
{
int ret = rte_atomic32_cmpset(&vf->pend_cmd,
- I40E_VIRTCHNL_OP_UNKNOWN, ops);
+ VIRTCHNL_OP_UNKNOWN, ops);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
@@ -347,7 +346,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
info.msg = args->out_buffer;
info.buf_len = args->out_size;
- info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
+ info.ops = VIRTCHNL_OP_UNKNOWN;
info.result = I40E_SUCCESS;
err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
@@ -359,12 +358,12 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
}
switch (args->ops) {
- case I40E_VIRTCHNL_OP_RESET_VF:
+ case VIRTCHNL_OP_RESET_VF:
/*no need to process in this function */
err = 0;
break;
- case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
/* for init adminq commands, need to poll the response */
err = -1;
do {
@@ -385,13 +384,18 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
/* for other adminq in running time, waiting the cmd done flag */
err = -1;
do {
- if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
err = 0;
break;
}
rte_delay_ms(ASQ_DELAY_MS);
/* If don't read msg or read sys event, continue */
} while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES) {
+ PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
+ _clear_cmd(vf);
+ }
break;
}
@@ -404,15 +408,15 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
static int
i40evf_check_api_version(struct rte_eth_dev *dev)
{
- struct i40e_virtchnl_version_info version, *pver;
+ struct virtchnl_version_info version, *pver;
int err;
struct vf_cmd_info args;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- version.major = I40E_VIRTCHNL_VERSION_MAJOR;
- version.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
- args.ops = I40E_VIRTCHNL_OP_VERSION;
+ args.ops = VIRTCHNL_OP_VERSION;
args.in_args = (uint8_t *)&version;
args.in_args_size = sizeof(version);
args.out_buffer = vf->aq_resp;
@@ -424,19 +428,19 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
return err;
}
- pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
+ pver = (struct virtchnl_version_info *)args.out_buffer;
vf->version_major = pver->major;
vf->version_minor = pver->minor;
if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
- else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
+ else if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
PMD_DRV_LOG(INFO, "Peer is Linux PF host");
else {
PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
vf->version_major, vf->version_minor,
- I40E_VIRTCHNL_VERSION_MAJOR,
- I40E_VIRTCHNL_VERSION_MINOR);
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
return -1;
}
@@ -452,15 +456,15 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
struct vf_cmd_info args;
uint32_t caps, len;
- args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
if (PF_IS_V11(vf)) {
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
- I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING;
args.in_args = (uint8_t *)&caps;
args.in_args_size = sizeof(caps);
} else {
@@ -474,8 +478,8 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
return err;
}
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
(void)rte_memcpy(vf->vf_res, args.out_buffer,
RTE_MIN(args.out_size, len));
@@ -492,18 +496,18 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_promisc_info promisc;
+ struct virtchnl_promisc_info promisc;
promisc.flags = 0;
promisc.vsi_id = vf->vsi_res->vsi_id;
if (enable_unicast)
- promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
if (enable_multicast)
- promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
- args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
args.in_args = (uint8_t *)&promisc;
args.in_args_size = sizeof(promisc);
args.out_buffer = vf->aq_resp;
@@ -517,30 +521,46 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
return err;
}
-/* Configure vlan and double vlan offload. Use flag to specify which part to configure */
static int
-i40evf_config_vlan_offload(struct rte_eth_dev *dev,
- bool enable_vlan_strip)
+i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_vlan_offload_info offload;
-
- offload.vsi_id = vf->vsi_res->vsi_id;
- offload.enable_vlan_strip = enable_vlan_strip;
+ int ret;
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
- args.in_args = (uint8_t *)&offload;
- args.in_args_size = sizeof(offload);
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
- PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
+ return ret;
+}
- return err;
+static int
+i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
+
+ return ret;
}
static int
@@ -550,7 +570,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err;
struct vf_cmd_info args;
- struct i40e_virtchnl_pvid_info tpid_info;
+ struct virtchnl_pvid_info tpid_info;
if (info == NULL) {
PMD_DRV_LOG(ERR, "invalid parameters");
@@ -561,7 +581,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
tpid_info.vsi_id = vf->vsi_res->vsi_id;
(void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
+ args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
args.in_args = (uint8_t *)&tpid_info;
args.in_args_size = sizeof(tpid_info);
args.out_buffer = vf->aq_resp;
@@ -575,7 +595,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
}
static void
-i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
+i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
uint16_t vsi_id,
uint16_t queue_id,
uint16_t nb_txq,
@@ -590,7 +610,7 @@ i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
}
static void
-i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
+i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
uint16_t vsi_id,
uint16_t queue_id,
uint16_t nb_rxq,
@@ -618,8 +638,8 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
- struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct virtchnl_vsi_queue_config_info *vc_vqci;
+ struct virtchnl_queue_pair_info *vc_qpi;
struct vf_cmd_info args;
uint16_t i, nb_qp = vf->num_queue_pairs;
const uint32_t size =
@@ -628,7 +648,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
int ret;
memset(buff, 0, sizeof(buff));
- vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
+ vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
vc_vqci->vsi_id = vf->vsi_res->vsi_id;
vc_vqci->num_queue_pairs = nb_qp;
@@ -640,7 +660,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
vf->max_pkt_len, rxq[i]);
}
memset(&args, 0, sizeof(args));
- args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
args.in_args = (uint8_t *)vc_vqci;
args.in_args_size = size;
args.out_buffer = vf->aq_resp;
@@ -648,7 +668,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+ "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
@@ -662,8 +682,8 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
- struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct virtchnl_vsi_queue_config_ext_info *vc_vqcei;
+ struct virtchnl_queue_pair_ext_info *vc_qpei;
struct vf_cmd_info args;
uint16_t i, nb_qp = vf->num_queue_pairs;
const uint32_t size =
@@ -672,7 +692,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
int ret;
memset(buff, 0, sizeof(buff));
- vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
+ vc_vqcei = (struct virtchnl_vsi_queue_config_ext_info *)buff;
vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
vc_vqcei->num_queue_pairs = nb_qp;
vc_qpei = vc_vqcei->qpair;
@@ -693,7 +713,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
}
memset(&args, 0, sizeof(args));
args.ops =
- (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
+ (enum virtchnl_ops)VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
args.in_args = (uint8_t *)vc_vqcei;
args.in_args_size = size;
args.out_buffer = vf->aq_resp;
@@ -701,7 +721,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
+ "VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
@@ -724,10 +744,10 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct vf_cmd_info args;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
- sizeof(struct i40e_virtchnl_vector_map)];
- struct i40e_virtchnl_irq_map_info *map_info;
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
+ sizeof(struct virtchnl_vector_map)];
+ struct virtchnl_irq_map_info *map_info;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vector_id;
int i, err;
@@ -741,7 +761,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
vector_id = I40E_MISC_VEC_ID;
}
- map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
+ map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
map_info->num_vectors = 1;
map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
@@ -756,7 +776,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
intr_handle->intr_vec[i] = vector_id;
}
- args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
args.in_args = (u8 *)cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -773,7 +793,7 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
bool on)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_queue_select queue_select;
+ struct virtchnl_queue_select queue_select;
int err;
struct vf_cmd_info args;
memset(&queue_select, 0, sizeof(queue_select));
@@ -785,9 +805,9 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
queue_select.tx_queues |= 1 << qid;
if (on)
- args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
else
- args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
args.in_args = (u8 *)&queue_select;
args.in_args_size = sizeof(queue_select);
args.out_buffer = vf->aq_resp;
@@ -861,10 +881,10 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
__rte_unused uint32_t index,
__rte_unused uint32_t pool)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
- sizeof(struct i40e_virtchnl_ether_addr)];
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
@@ -876,13 +896,13 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
return I40E_ERR_INVALID_MAC_ADDR;
}
- list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
args.in_args = cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -891,6 +911,8 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_ADD_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num++;
return err;
}
@@ -899,10 +921,10 @@ static void
i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
- sizeof(struct i40e_virtchnl_ether_addr)];
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
+ sizeof(struct virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
@@ -914,13 +936,13 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
return;
}
- list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
- args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
args.in_args = cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -929,6 +951,8 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_DEL_ETHER_ADDRESS");
+ else
+ vf->vsi.mac_num--;
return;
}
@@ -947,13 +971,13 @@ static int
i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_queue_select q_stats;
+ struct virtchnl_queue_select q_stats;
int err;
struct vf_cmd_info args;
memset(&q_stats, 0, sizeof(q_stats));
q_stats.vsi_id = vf->vsi_res->vsi_id;
- args.ops = I40E_VIRTCHNL_OP_GET_STATS;
+ args.ops = VIRTCHNL_OP_GET_STATS;
args.in_args = (u8 *)&q_stats;
args.in_args_size = sizeof(q_stats);
args.out_buffer = vf->aq_resp;
@@ -1050,18 +1074,18 @@ static int
i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vlan_filter_list *vlan_list;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
sizeof(uint16_t)];
int err;
struct vf_cmd_info args;
- vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
vlan_list->vsi_id = vf->vsi_res->vsi_id;
vlan_list->num_elements = 1;
vlan_list->vlan_id[0] = vlanid;
- args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
+ args.ops = VIRTCHNL_OP_ADD_VLAN;
args.in_args = (u8 *)&cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -1077,18 +1101,18 @@ static int
i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vlan_filter_list *vlan_list;
- uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ struct virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
sizeof(uint16_t)];
int err;
struct vf_cmd_info args;
- vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
vlan_list->vsi_id = vf->vsi_res->vsi_id;
vlan_list->num_elements = 1;
vlan_list->vlan_id[0] = vlanid;
- args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
+ args.ops = VIRTCHNL_OP_DEL_VLAN;
args.in_args = (u8 *)&cmd_buffer;
args.in_args_size = sizeof(cmd_buffer);
args.out_buffer = vf->aq_resp;
@@ -1178,7 +1202,7 @@ i40evf_reset_vf(struct i40e_hw *hw)
reset = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
- if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
+ if (VIRTCHNL_VFR_COMPLETED == reset || VIRTCHNL_VFR_VFACTIVE == reset)
break;
else
rte_delay_ms(50);
@@ -1242,8 +1266,8 @@ i40evf_init_vf(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "check_api version failed");
goto err_aq;
}
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
if (!vf->vf_res) {
PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
@@ -1257,7 +1281,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vf->vf_res->num_vsis; i++) {
- if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
vf->vsi_res = &vf->vf_res->vsi_res[i];
}
@@ -1269,7 +1293,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
if (hw->mac.type == I40E_MAC_X722_VF)
vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
vf->vsi.vsi_id = vf->vsi_res->vsi_id;
- vf->vsi.type = vf->vsi_res->vsi_type;
+ vf->vsi.type = (enum i40e_vsi_type)vf->vsi_res->vsi_type;
vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -1318,25 +1342,25 @@ i40evf_uninit_vf(struct rte_eth_dev *dev)
}
static void
-i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
- uint8_t *msg,
- __rte_unused uint16_t msglen)
+i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
+ __rte_unused uint16_t msglen)
{
- struct i40e_virtchnl_pf_event *pf_msg =
- (struct i40e_virtchnl_pf_event *)msg;
+ struct virtchnl_pf_event *pf_msg =
+ (struct virtchnl_pf_event *)msg;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
switch (pf_msg->event) {
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
break;
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
- case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
@@ -1352,7 +1376,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info info;
uint16_t pending, aq_opc;
- enum i40e_virtchnl_ops msg_opc;
+ enum virtchnl_ops msg_opc;
enum i40e_status_code msg_ret;
int ret;
@@ -1377,13 +1401,13 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* cookie_high of struct i40e_aq_desc, while return error code
* are stored in cookie_low, Which is done by
* i40e_aq_send_msg_to_vf in PF driver.*/
- msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
info.desc.cookie_high);
msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
info.desc.cookie_low);
switch (aq_opc) {
case i40e_aqc_opc_send_msg_to_vf:
- if (msg_opc == I40E_VIRTCHNL_OP_EVENT)
+ if (msg_opc == VIRTCHNL_OP_EVENT)
/* process event*/
i40evf_handle_pf_event(dev, info.msg_buf,
info.msg_len);
@@ -1460,7 +1484,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
struct i40e_hw *hw
= I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1592,8 +1616,8 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
*/
if (!conf->rxmode.hw_strip_crc) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) {
+ if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
/* Peer is running non-DPDK PF driver. */
PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
return -EINVAL;
@@ -1621,22 +1645,15 @@ i40evf_init_vlan(struct rte_eth_dev *dev)
static void
i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
- bool enable_vlan_strip = 0;
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- /* Linux pf host doesn't support vlan offload yet */
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
- /* Vlan stripping setting */
- if (mask & ETH_VLAN_STRIP_MASK) {
- /* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
- enable_vlan_strip = 1;
- else
- enable_vlan_strip = 0;
-
- i40evf_config_vlan_offload(dev, enable_vlan_strip);
- }
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.hw_vlan_strip)
+ i40evf_enable_vlan_strip(dev);
+ else
+ i40evf_disable_vlan_strip(dev);
}
}
@@ -1884,7 +1901,7 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
@@ -1917,7 +1934,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
@@ -1944,7 +1961,7 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
@@ -1979,7 +1996,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -2001,7 +2018,7 @@ i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static void
i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
{
- struct i40e_virtchnl_ether_addr_list *list;
+ struct virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
int err, i, j;
int next_begin = 0;
@@ -2012,11 +2029,11 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
do {
j = 0;
- len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ len = sizeof(struct virtchnl_ether_addr_list);
for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
continue;
- len += sizeof(struct i40e_virtchnl_ether_addr);
+ len += sizeof(struct virtchnl_ether_addr);
if (len >= I40E_AQ_BUF_SZ) {
next_begin = i + 1;
break;
@@ -2043,17 +2060,23 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = j;
- args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS :
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
args.in_args = (uint8_t *)list;
args.in_args_size = len;
args.out_buffer = vf->aq_resp;
args.out_size = I40E_AQ_BUF_SZ;
err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
+ if (err) {
PMD_DRV_LOG(ERR, "fail to execute command %s",
add ? "OP_ADD_ETHER_ADDRESS" :
"OP_DEL_ETHER_ADDRESS");
+ } else {
+ if (add)
+ vf->vsi.mac_num++;
+ else
+ vf->vsi.mac_num--;
+ }
rte_free(list);
begin = next_begin;
} while (begin < I40E_NUM_MACADDR_MAX);
@@ -2064,7 +2087,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
@@ -2130,11 +2153,14 @@ err_queue:
static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev);
PMD_INIT_FUNC_TRACE();
+ if (hw->adapter_stopped == 1)
+ return;
i40evf_stop_queues(dev);
i40evf_disable_queues_intr(dev);
i40e_dev_clear_queues(dev);
@@ -2147,6 +2173,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
}
/* remove all mac addrs */
i40evf_add_del_all_mac_addr(dev, FALSE);
+ hw->adapter_stopped = 1;
}
@@ -2261,7 +2288,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2330,11 +2357,10 @@ static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
- hw->adapter_stopped = 1;
i40e_dev_free_queues(dev);
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 28cc554f..8013add4 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -67,15 +67,13 @@
#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
-#define I40E_FDIR_IPv6_TC_OFFSET 20
#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
#define I40E_FDIR_UDP_DEFAULT_LEN 400
-/* Wait count and interval for fdir filter programming */
-#define I40E_FDIR_WAIT_COUNT 10
-#define I40E_FDIR_WAIT_INTERVAL_US 1000
+/* Wait time for fdir filter programming */
+#define I40E_FDIR_MAX_WAIT_US 10000
/* Wait count and interval for fdir filter flush */
#define I40E_FDIR_FLUSH_RETRY 50
@@ -84,21 +82,6 @@
#define I40E_COUNTER_PF 2
/* Statistic counter index for one pf */
#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
-#define I40E_MAX_FLX_SOURCE_OFF 480
-#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
-
-#define NONUSE_FLX_PIT_DEST_OFF 63
-#define NONUSE_FLX_PIT_FSIZE 1
-#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
- (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
- I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
- (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
- I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
- ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
- NONUSE_FLX_PIT_DEST_OFF : \
- ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
- I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
- I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
#define I40E_FDIR_FLOWS ( \
(1 << RTE_ETH_FLOW_FRAG_IPV4) | \
@@ -113,8 +96,6 @@
(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
(1 << RTE_ETH_FLOW_L2_PAYLOAD))
-#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
-
static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
@@ -257,7 +238,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->data->drv_name,
+ eth_dev->device->driver->name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -300,8 +281,12 @@ i40e_fdir_teardown(struct i40e_pf *pf)
vsi = pf->fdir.fdir_vsi;
if (!vsi)
return;
- i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
- i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
i40e_dev_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
i40e_dev_tx_queue_release(pf->fdir.txq);
@@ -378,8 +363,6 @@ i40e_init_flx_pld(struct i40e_pf *pf)
}
}
-#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
-
#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
if ((flex_pit2).src_offset < \
(flex_pit1).src_offset + (flex_pit1).size) { \
@@ -1295,28 +1278,27 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
/* Update the tx tail register */
rte_wmb();
I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
-
- for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
- rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
+ for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
if ((txdp->cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
break;
+ rte_delay_us(1);
}
- if (i >= I40E_FDIR_WAIT_COUNT) {
+ if (i >= I40E_FDIR_MAX_WAIT_US) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" time out to get DD on tx queue.");
return -ETIMEDOUT;
}
/* totally delay 10 ms to check programming status*/
- rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
- if (i40e_check_fdir_programming_status(rxq) < 0) {
- PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
- " programming status reported.");
- return -ENOSYS;
+ for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if (i40e_check_fdir_programming_status(rxq) >= 0)
+ return 0;
+ rte_delay_us(1);
}
-
- return 0;
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
}
/*
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 24e1c658..b92719a3 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -52,8 +52,7 @@
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
-#define I40E_IPV4_TC_SHIFT 4
-#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
#define I40E_IPV6_FRAG_HEADER 44
#define I40E_TENANT_ARRAY_NUM 3
#define I40E_TCI_MASK 0xFFFF
@@ -114,6 +113,12 @@ static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -135,7 +140,7 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
struct rte_flow_error *error,
union i40e_filter_t *filter);
static int
-i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_tunnel_filter_conf *filter);
@@ -158,102 +163,1295 @@ static enum rte_flow_item_type pattern_ethertype[] = {
/* Pattern matched flow director filter */
static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
RTE_FLOW_ITEM_TYPE_END,
};
-static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_RAW,
+ RTE_FLOW_ITEM_TYPE_VF,
RTE_FLOW_ITEM_TYPE_END,
};
@@ -296,7 +1494,40 @@ static enum rte_flow_item_type pattern_vxlan_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_nvgre_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_mpls_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
@@ -329,7 +1560,6 @@ static enum rte_flow_item_type pattern_mpls_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched QINQ */
static enum rte_flow_item_type pattern_qinq_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
@@ -340,28 +1570,163 @@ static enum rte_flow_item_type pattern_qinq_1[] = {
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
- /* FDIR */
+ /* FDIR - support default flow type without flexible payload*/
+ { pattern_ethertype, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
- { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* FDIR - support default flow type with flexible payload */
+ { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support single vlan input set */
+ { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
+ /* FDIR - support VF item */
+ { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
/* VXLAN */
{ pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* NVGRE */
+ { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
/* MPLSoUDP & MPLSoGRE */
{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
@@ -452,10 +1817,10 @@ i40e_match_pattern(enum rte_flow_item_type *item_array,
/* Find if there's parse filter function matched */
static parse_filter_t
-i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
{
parse_filter_t parse_filter = NULL;
- uint8_t i = 0;
+ uint8_t i = *idx;
for (; i < RTE_DIM(i40e_supported_patterns); i++) {
if (i40e_match_pattern(i40e_supported_patterns[i].items,
@@ -465,6 +1830,8 @@ i40e_find_parse_filter_func(struct rte_flow_item *pattern)
}
}
+ *idx = ++i;
+
return parse_filter;
}
@@ -704,12 +2071,244 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_flow_check_raw_item(const struct rte_flow_item *item,
+ const struct rte_flow_item_raw *raw_spec,
+ struct rte_flow_error *error)
+{
+ if (!raw_spec->relative) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Relative should be 1.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset % sizeof(uint16_t)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be even.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->search || raw_spec->limit) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "search or limit is not supported.");
+ return -rte_errno;
+ }
+
+ if (raw_spec->offset < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Offset should be non-negative.");
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_pit(struct i40e_pf *pf,
+ struct i40e_fdir_flex_pit *flex_pit,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ uint8_t field_idx;
+
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
+ pf->fdir.flex_set[field_idx].size != flex_pit->size ||
+ pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
+ return -1;
+
+ /* Check if the configuration exists. */
+ if (pf->fdir.flex_pit_flag[layer_idx] &&
+ (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
+ pf->fdir.flex_set[field_idx].size == flex_pit->size &&
+ pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
+ return 1;
+
+ pf->fdir.flex_set[field_idx].src_offset =
+ flex_pit->src_offset;
+ pf->fdir.flex_set[field_idx].size =
+ flex_pit->size;
+ pf->fdir.flex_set[field_idx].dst_offset =
+ flex_pit->dst_offset;
+
+ return 0;
+}
+
+static int
+i40e_flow_store_flex_mask(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint8_t *mask)
+{
+ struct i40e_fdir_flex_mask flex_mask;
+ uint16_t mask_tmp;
+ uint8_t i, nb_bitmask = 0;
+
+ memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
+ if (mask_tmp) {
+ flex_mask.word_mask |=
+ I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+ if (mask_tmp != UINT16_MAX) {
+ flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
+ flex_mask.bitmask[nb_bitmask].offset =
+ i / sizeof(uint16_t);
+ nb_bitmask++;
+ if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
+ return -1;
+ }
+ }
+ }
+ flex_mask.nb_bitmask = nb_bitmask;
+
+ if (pf->fdir.flex_mask_flag[pctype] &&
+ (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return -2;
+ else if (pf->fdir.flex_mask_flag[pctype] &&
+ !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
+ sizeof(struct i40e_fdir_flex_mask))))
+ return 1;
+
+ memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
+ sizeof(struct i40e_fdir_flex_mask));
+ return 0;
+}
+
+static void
+i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
+ enum i40e_flxpld_layer_idx layer_idx,
+ uint8_t raw_id)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t flx_pit;
+ uint8_t field_idx;
+ uint16_t min_next_off = 0; /* in words */
+ uint8_t i;
+
+ /* Set flex pit */
+ for (i = 0; i < raw_id; i++) {
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+ pf->fdir.flex_set[field_idx].size,
+ pf->fdir.flex_set[field_idx].dst_offset);
+
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+ pf->fdir.flex_set[field_idx].size;
+ }
+
+ for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+ /* set the non-used register obeying register's constrain */
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+ NONUSE_FLX_PIT_DEST_OFF);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ min_next_off++;
+ }
+
+ pf->fdir.flex_pit_flag[layer_idx] = 1;
+}
+
+static void
+i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_mask *flex_mask;
+ uint32_t flxinset, fd_mask;
+ uint8_t i;
+
+ /* Set flex mask */
+ flex_mask = &pf->fdir.flex_mask[pctype];
+ flxinset = (flex_mask->word_mask <<
+ I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+ I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+ for (i = 0; i < flex_mask->nb_bitmask; i++) {
+ fd_mask = (flex_mask->bitmask[i].mask <<
+ I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+ I40E_PRTQF_FD_MSK_MASK_MASK;
+ fd_mask |= ((flex_mask->bitmask[i].offset +
+ I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+ I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+ I40E_PRTQF_FD_MSK_OFFSET_MASK;
+ i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+ }
+
+ pf->fdir.flex_mask_flag[pctype] = 1;
+}
+
+static int
+i40e_flow_set_fdir_inset(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ uint64_t input_set)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t inset_reg = 0;
+ uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
+ int i, num;
+
+ /* Check if the input set is valid */
+ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
+ input_set) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ return -EINVAL;
+ }
+
+ /* Check if the configuration is conflicted */
+ if (pf->fdir.inset_flag[pctype] &&
+ memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return -1;
+
+ if (pf->fdir.inset_flag[pctype] &&
+ !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
+ return 0;
+
+ num = i40e_generate_inset_mask_reg(input_set, mask_reg,
+ I40E_INSET_MASK_NUM_REG);
+ if (num < 0)
+ return -EINVAL;
+
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
+
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+ I40E_WRITE_FLUSH(hw);
+
+ pf->fdir.input_set[pctype] = input_set;
+ pf->fdir.inset_flag[pctype] = 1;
+ return 0;
+}
+
/* 1. Last in item should be NULL as range is not supported.
- * 2. Supported flow type and input set: refer to array
- * default_inset_table in i40e_ethdev.c.
- * 3. Mask of fields which need to be matched should be
+ * 2. Supported patterns: refer to array i40e_supported_patterns.
+ * 3. Supported flow type and input set: refer to array
+ * valid_fdir_inset_table in i40e_ethdev.c.
+ * 4. Mask of fields which need to be matched should be
* filled with 1.
- * 4. Mask of fields which needn't to be matched should be
+ * 5. Mask of fields which needn't to be matched should be
* filled with 0.
*/
static int
@@ -721,20 +2320,44 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
const struct rte_flow_item_udp *udp_spec, *udp_mask;
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_vf *vf_spec;
+
uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
enum i40e_filter_pctype pctype;
uint64_t input_set = I40E_INSET_NONE;
- uint16_t flag_offset;
+ uint16_t frag_off;
enum rte_flow_item_type item_type;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
- uint32_t j;
+ uint32_t i, j;
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
+ uint8_t raw_id = 0;
+ int32_t off_arr[I40E_MAX_FLXPLD_FIED];
+ uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
+ struct i40e_fdir_flex_pit flex_pit;
+ uint8_t next_dst_off = 0;
+ uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
+ uint16_t flex_size;
+ bool cfg_flex_pit = true;
+ bool cfg_flex_msk = true;
+ uint16_t outer_tpid;
+ uint16_t ether_type;
+ uint32_t vtc_flow_cpu;
+ int ret;
+ memset(off_arr, 0, sizeof(off_arr));
+ memset(len_arr, 0, sizeof(len_arr));
+ memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
+ outer_tpid = i40e_get_outer_vlan(dev);
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -748,13 +2371,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = (const struct rte_flow_item_eth *)item->spec;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
- if (eth_spec || eth_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid ETH spec/mask");
- return -rte_errno;
+
+ if (eth_spec && eth_mask) {
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ !is_zero_ether_addr(&eth_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask.");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) ==
+ UINT16_MAX) {
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
+ }
+
+ ether_type = rte_be_to_cpu_16(eth_spec->type);
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type.");
+ return -rte_errno;
+ }
+ }
+
+ flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ input_set |= I40E_INSET_VLAN_INNER;
+ filter->input.flow_ext.vlan_tci =
+ vlan_spec->tci;
+ }
}
+
+ flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ layer_idx = I40E_FLXPLD_L2_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
@@ -762,58 +2430,55 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv4 *)item->spec;
ipv4_mask =
(const struct rte_flow_item_ipv4 *)item->mask;
- if (!ipv4_spec || !ipv4_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL IPv4 spec/mask");
- return -rte_errno;
- }
- /* Check IPv4 mask and update input set */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error, EINVAL,
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid IPv4 mask.");
- return -rte_errno;
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ frag_off = ipv4_spec->hdr.fragment_offset;
+ frag_off = rte_be_to_cpu_16(frag_off);
+ if (frag_off & IPV4_HDR_OFFSET_MASK ||
+ frag_off & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
}
- if (ipv4_mask->hdr.src_addr == UINT32_MAX)
- input_set |= I40E_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
- input_set |= I40E_INSET_IPV4_DST;
- if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_TOS;
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
- input_set |= I40E_INSET_IPV4_PROTO;
-
- /* Get filter info */
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
- /* Check if it is fragment. */
- flag_offset =
- rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
- if (flag_offset & IPV4_HDR_OFFSET_MASK ||
- flag_offset & IPV4_HDR_MF_FLAG)
- flow_type = RTE_ETH_FLOW_FRAG_IPV4;
-
- /* Get the filter info */
- filter->input.flow.ip4_flow.proto =
- ipv4_spec->hdr.next_proto_id;
- filter->input.flow.ip4_flow.tos =
- ipv4_spec->hdr.type_of_service;
- filter->input.flow.ip4_flow.ttl =
- ipv4_spec->hdr.time_to_live;
- filter->input.flow.ip4_flow.src_ip =
- ipv4_spec->hdr.src_addr;
- filter->input.flow.ip4_flow.dst_ip =
- ipv4_spec->hdr.dst_addr;
+ layer_idx = I40E_FLXPLD_L3_IDX;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -822,232 +2487,276 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv6 *)item->spec;
ipv6_mask =
(const struct rte_flow_item_ipv6 *)item->mask;
- if (!ipv6_spec || !ipv6_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL IPv6 spec/mask");
- return -rte_errno;
- }
-
- /* Check IPv6 mask and update input set */
- if (ipv6_mask->hdr.payload_len) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv6 mask");
- return -rte_errno;
- }
- /* SCR and DST address of IPv6 shouldn't be masked */
- for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
- if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
- ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ if (ipv6_spec && ipv6_mask) {
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid IPv6 mask");
return -rte_errno;
}
+
+ if (!memcmp(ipv6_mask->hdr.src_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr)))
+ input_set |= I40E_INSET_IPV6_SRC;
+ if (!memcmp(ipv6_mask->hdr.dst_addr,
+ ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ vtc_flow_cpu =
+ rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(vtc_flow_cpu >>
+ I40E_FDIR_IPv6_TC_OFFSET);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto ==
+ I40E_IPV6_FRAG_HEADER)
+ flow_type =
+ RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
}
- input_set |= I40E_INSET_IPV6_SRC;
- input_set |= I40E_INSET_IPV6_DST;
-
- if ((ipv6_mask->hdr.vtc_flow &
- rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
- == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
- input_set |= I40E_INSET_IPV6_TC;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |= I40E_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
- input_set |= I40E_INSET_IPV6_HOP_LIMIT;
-
- /* Get filter info */
- filter->input.flow.ipv6_flow.tc =
- (uint8_t)(ipv6_spec->hdr.vtc_flow <<
- I40E_IPV4_TC_SHIFT);
- filter->input.flow.ipv6_flow.proto =
- ipv6_spec->hdr.proto;
- filter->input.flow.ipv6_flow.hop_limits =
- ipv6_spec->hdr.hop_limits;
-
- rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
- ipv6_spec->hdr.src_addr, 16);
- rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
- ipv6_spec->hdr.dst_addr, 16);
-
- /* Check if it is fragment. */
- if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
- flow_type = RTE_ETH_FLOW_FRAG_IPV6;
- else
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ layer_idx = I40E_FLXPLD_L3_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_TCP:
tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
- if (!tcp_spec || !tcp_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL TCP spec/mask");
- return -rte_errno;
- }
- /* Check TCP mask and update input set */
- if (tcp_mask->hdr.sent_seq ||
- tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags ||
- tcp_mask->hdr.rx_win ||
- tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
- rte_flow_error_set(error, EINVAL,
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid TCP mask");
- return -rte_errno;
- }
+ return -rte_errno;
+ }
- if (tcp_mask->hdr.src_port != UINT16_MAX ||
- tcp_mask->hdr.dst_port != UINT16_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TCP mask");
- return -rte_errno;
+ if (tcp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (tcp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.tcp4_flow.src_port =
- tcp_spec->hdr.src_port;
- filter->input.flow.tcp4_flow.dst_port =
- tcp_spec->hdr.dst_port;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.tcp6_flow.src_port =
- tcp_spec->hdr.src_port;
- filter->input.flow.tcp6_flow.dst_port =
- tcp_spec->hdr.dst_port;
- }
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = (const struct rte_flow_item_udp *)item->spec;
udp_mask = (const struct rte_flow_item_udp *)item->mask;
- if (!udp_spec || !udp_mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "NULL UDP spec/mask");
- return -rte_errno;
- }
- /* Check UDP mask and update input set*/
- if (udp_mask->hdr.dgram_len ||
- udp_mask->hdr.dgram_cksum) {
- rte_flow_error_set(error, EINVAL,
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid UDP mask");
- return -rte_errno;
- }
+ return -rte_errno;
+ }
- if (udp_mask->hdr.src_port != UINT16_MAX ||
- udp_mask->hdr.dst_port != UINT16_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
- return -rte_errno;
+ if (udp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.udp4_flow.src_port =
- udp_spec->hdr.src_port;
- filter->input.flow.udp4_flow.dst_port =
- udp_spec->hdr.dst_port;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.udp6_flow.src_port =
- udp_spec->hdr.src_port;
- filter->input.flow.udp6_flow.dst_port =
- udp_spec->hdr.dst_port;
- }
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
sctp_spec =
(const struct rte_flow_item_sctp *)item->spec;
sctp_mask =
(const struct rte_flow_item_sctp *)item->mask;
- if (!sctp_spec || !sctp_mask) {
- rte_flow_error_set(error, EINVAL,
+
+ if (sctp_spec && sctp_mask) {
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "NULL SCTP spec/mask");
- return -rte_errno;
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port == UINT16_MAX)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (sctp_mask->hdr.dst_port == UINT16_MAX)
+ input_set |= I40E_INSET_DST_PORT;
+ if (sctp_mask->hdr.tag == UINT32_MAX)
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag
+ = sctp_spec->hdr.tag;
+ }
}
- /* Check SCTP mask and update input set */
- if (sctp_mask->hdr.cksum) {
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ if (!raw_spec || !raw_mask) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "Invalid UDP mask");
+ "NULL RAW spec/mask");
return -rte_errno;
}
- if (sctp_mask->hdr.src_port != UINT16_MAX ||
- sctp_mask->hdr.dst_port != UINT16_MAX ||
- sctp_mask->hdr.tag != UINT32_MAX) {
+ ret = i40e_flow_check_raw_item(item, raw_spec, error);
+ if (ret < 0)
+ return ret;
+
+ off_arr[raw_id] = raw_spec->offset;
+ len_arr[raw_id] = raw_spec->length;
+
+ flex_size = 0;
+ memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
+ flex_pit.size =
+ raw_spec->length / sizeof(uint16_t);
+ flex_pit.dst_offset =
+ next_dst_off / sizeof(uint16_t);
+
+ for (i = 0; i <= raw_id; i++) {
+ if (i == raw_id)
+ flex_pit.src_offset +=
+ raw_spec->offset /
+ sizeof(uint16_t);
+ else
+ flex_pit.src_offset +=
+ (off_arr[i] + len_arr[i]) /
+ sizeof(uint16_t);
+ flex_size += len_arr[i];
+ }
+ if (((flex_pit.src_offset + flex_pit.size) >=
+ I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
+ flex_size > I40E_FDIR_MAX_FLEXLEN) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceeds maxmial payload limit.");
return -rte_errno;
}
- input_set |= I40E_INSET_SRC_PORT;
- input_set |= I40E_INSET_DST_PORT;
- input_set |= I40E_INSET_SCTP_VT;
-
- /* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
-
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
- filter->input.flow.sctp4_flow.src_port =
- sctp_spec->hdr.src_port;
- filter->input.flow.sctp4_flow.dst_port =
- sctp_spec->hdr.dst_port;
- filter->input.flow.sctp4_flow.verify_tag =
- sctp_spec->hdr.tag;
- } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
- filter->input.flow.sctp6_flow.src_port =
- sctp_spec->hdr.src_port;
- filter->input.flow.sctp6_flow.dst_port =
- sctp_spec->hdr.dst_port;
- filter->input.flow.sctp6_flow.verify_tag =
- sctp_spec->hdr.tag;
+
+ /* Store flex pit to SW */
+ ret = i40e_flow_store_flex_pit(pf, &flex_pit,
+ layer_idx, raw_id);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule.");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_pit = false;
+
+ for (i = 0; i < raw_spec->length; i++) {
+ j = i + next_dst_off;
+ filter->input.flow_ext.flexbytes[j] =
+ raw_spec->pattern[i];
+ flex_mask[j] = raw_mask->pattern[i];
}
+
+ next_dst_off += raw_spec->length;
+ raw_id++;
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = (const struct rte_flow_item_vf *)item->spec;
@@ -1075,14 +2784,44 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (input_set != i40e_get_default_input_set(pctype)) {
+ ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Conflict with the first rule's input set.");
+ return -rte_errno;
+ } else if (ret == -EINVAL) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Invalid input set.");
+ "Invalid pattern mask.");
return -rte_errno;
}
+
filter->input.flow_type = flow_type;
+ /* Store flex mask to SW */
+ ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceed maximal number of bitmasks");
+ return -rte_errno;
+ } else if (ret == -2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_msk = false;
+
+ if (cfg_flex_pit)
+ i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+
+ if (cfg_flex_msk)
+ i40e_flow_set_fdir_flex_msk(pf, pctype);
+
return 0;
}
@@ -1101,55 +2840,64 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
const struct rte_flow_action_mark *mark_spec;
uint32_t index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
+ /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
- act->type != RTE_FLOW_ACTION_TYPE_DROP) {
- rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- return -rte_errno;
- }
-
- act_q = (const struct rte_flow_action_queue *)act->conf;
- filter->action.flex_off = 0;
- if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.rx_queue = act_q->index;
+ if ((!filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
+ (filter->input.flow_ext.is_vf &&
+ filter->action.rx_queue >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
- else
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
filter->action.behavior = RTE_ETH_FDIR_REJECT;
-
- filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
- filter->action.rx_queue = act_q->index;
-
- if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ break;
+ case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+ filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+ break;
+ default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid queue ID for FDIR.");
+ "Invalid action.");
return -rte_errno;
}
- /* Check if the next non-void item is MARK or END. */
+ /* Check if the next non-void item is MARK or FLAG or END. */
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
- act->type != RTE_FLOW_ACTION_TYPE_END) {
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->soft_id = mark_spec->id;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ return 0;
+ default:
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
act, "Invalid action.");
return -rte_errno;
}
- if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
- mark_spec = (const struct rte_flow_action_mark *)act->conf;
- filter->soft_id = mark_spec->id;
-
- /* Check if the next non-void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- return -rte_errno;
- }
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
}
return 0;
@@ -1262,27 +3010,27 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
return 0;
}
+static uint16_t i40e_supported_tunnel_filter_types[] = {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IMAC,
+ ETH_TUNNEL_FILTER_IMAC,
+};
+
static int
-i40e_check_tenant_id_mask(const uint8_t *mask)
+i40e_check_tunnel_filter_type(uint8_t filter_type)
{
- uint32_t j;
- int is_masked = 0;
-
- for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
- if (*(mask + j) == UINT8_MAX) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 0;
- } else if (*(mask + j) == 0) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 1;
- } else {
- return -EINVAL;
- }
+ uint8_t i;
+
+ for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
+ if (filter_type == i40e_supported_tunnel_filter_types[i])
+ return 0;
}
- return is_masked;
+ return -1;
}
/* 1. Last in item should be NULL as range is not supported.
@@ -1302,18 +3050,17 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
- const struct rte_flow_item_eth *o_eth_spec = NULL;
- const struct rte_flow_item_eth *o_eth_mask = NULL;
- const struct rte_flow_item_vxlan *vxlan_spec = NULL;
- const struct rte_flow_item_vxlan *vxlan_mask = NULL;
- const struct rte_flow_item_eth *i_eth_spec = NULL;
- const struct rte_flow_item_eth *i_eth_mask = NULL;
- const struct rte_flow_item_vlan *vlan_spec = NULL;
- const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint8_t filter_type = 0;
bool is_vni_masked = 0;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
enum rte_flow_item_type item_type;
bool vxlan_flag = 0;
uint32_t tenant_id_be = 0;
+ int ret;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
@@ -1328,6 +3075,11 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = (const struct rte_flow_item_eth *)item->spec;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
if ((!eth_spec && eth_mask) ||
(eth_spec && !eth_mask)) {
rte_flow_error_set(error, EINVAL,
@@ -1351,50 +3103,40 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
return -rte_errno;
}
- if (!vxlan_flag)
+ if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
&eth_spec->dst,
ETHER_ADDR_LEN);
- else
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
rte_memcpy(&filter->inner_mac,
&eth_spec->dst,
ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
}
-
- if (!vxlan_flag) {
- o_eth_spec = eth_spec;
- o_eth_mask = eth_mask;
- } else {
- i_eth_spec = eth_spec;
- i_eth_mask = eth_mask;
- }
-
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec =
(const struct rte_flow_item_vlan *)item->spec;
vlan_mask =
(const struct rte_flow_item_vlan *)item->mask;
- if (vxlan_flag) {
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
- if (!(vlan_spec && vlan_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid vlan item");
- return -rte_errno;
- }
- } else {
- if (vlan_spec || vlan_mask)
- rte_flow_error_set(error, EINVAL,
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid vlan item");
return -rte_errno;
}
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
@@ -1441,7 +3183,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
(const struct rte_flow_item_vxlan *)item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
- * If no, either spec or mask shouldn't be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
if ((!vxlan_spec && vxlan_mask) ||
(vxlan_spec && !vxlan_mask)) {
@@ -1453,17 +3195,25 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Check if VNI is masked. */
- if (vxlan_mask) {
+ if (vxlan_spec && vxlan_mask) {
is_vni_masked =
- i40e_check_tenant_id_mask(vxlan_mask->vni);
- if (is_vni_masked < 0) {
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (is_vni_masked) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid VNI mask");
return -rte_errno;
}
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
}
+
vxlan_flag = 1;
break;
default:
@@ -1471,95 +3221,243 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
}
- /* Check specification and mask to get the filter type */
- if (vlan_spec && vlan_mask &&
- (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
- /* If there's inner vlan */
- filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
- & I40E_TCI_MASK;
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
- else {
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ filter->filter_type = filter_type;
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ enum rte_flow_item_type item_type;
+ uint8_t filter_type = 0;
+ bool is_tni_masked = 0;
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ bool nvgre_flag = 0;
+ uint32_t tenant_id_be = 0;
+ int ret;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
+ item,
+ "Invalid ether spec/mask");
return -rte_errno;
}
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN;
- else {
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!nvgre_flag) {
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
+ item,
+ "Invalid vlan item");
return -rte_errno;
}
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else if ((!vlan_spec && !vlan_mask) ||
- (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
- /* If there's no inner vlan */
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_TENID;
- else if (o_eth_spec && o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask) {
- filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
- } else {
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
return -rte_errno;
}
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
- return -rte_errno;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ is_tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (is_tni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
+ }
+
+ nvgre_flag = 1;
+ break;
+ default:
+ break;
}
- } else {
+ }
+
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Not supported by tunnel filter.");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
return -rte_errno;
}
+ filter->filter_type = filter_type;
- filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+ filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
return 0;
}
static int
-i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -1570,7 +3468,7 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
&filter->consistent_tunnel_filter;
int ret;
- ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
error, tunnel_filter);
if (ret)
return ret;
@@ -1821,8 +3719,10 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Get filter specification */
- if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
- (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask != NULL) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
& I40E_TCI_MASK;
filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
@@ -1880,7 +3780,8 @@ i40e_flow_validate(struct rte_eth_dev *dev,
parse_filter_t parse_filter;
uint32_t item_num = 0; /* non-void item number of pattern*/
uint32_t i = 0;
- int ret;
+ bool flag = false;
+ int ret = I40E_NOT_SUPPORTED;
if (!pattern) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1922,16 +3823,21 @@ i40e_flow_validate(struct rte_eth_dev *dev,
i40e_pattern_skip_void_item(items, pattern);
- /* Find if there's matched parse filter function */
- parse_filter = i40e_find_parse_filter_func(items);
- if (!parse_filter) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern, "Unsupported pattern");
- return -rte_errno;
- }
-
- ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+ i = 0;
+ do {
+ parse_filter = i40e_find_parse_filter_func(items, &i);
+ if (!parse_filter && !flag) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ rte_free(items);
+ return -rte_errno;
+ }
+ if (parse_filter)
+ ret = parse_filter(dev, attr, items, actions,
+ error, &cons_filter);
+ flag = true;
+ } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
rte_free(items);
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 0758503e..100f8dc2 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -61,7 +61,7 @@
static int
i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_queue_select *qsel,
+ struct virtchnl_queue_select *qsel,
bool on);
/**
@@ -128,7 +128,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
struct i40e_pf *pf;
uint16_t vf_id, abs_vf_id, vf_msix_num;
int ret;
- struct i40e_virtchnl_queue_select qsel;
+ struct virtchnl_queue_select qsel;
if (vf == NULL)
return -EINVAL;
@@ -139,7 +139,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
abs_vf_id = vf_id + hw->func_caps.vf_base_id;
/* Notify VF that we are in VFR progress */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS);
/*
* If require a SW VF reset, a VFLR interrupt will be generated,
@@ -167,7 +167,6 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
PMD_DRV_LOG(ERR, "VF reset timeout");
return -ETIMEDOUT;
}
-
/* This is not first time to do reset, do cleanup job first */
if (vf->vsi) {
/* Disable queues */
@@ -220,7 +219,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
}
/* Reset done, Set COMPLETE flag and clear reset bit */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED);
val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
@@ -248,7 +247,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
return -EFAULT;
}
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE);
return ret;
}
@@ -277,7 +276,7 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
static void
i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
{
- struct i40e_virtchnl_version_info info;
+ struct virtchnl_version_info info;
/* Respond like a Linux PF host in order to support both DPDK VF and
* Linux VF driver. The expense is original DPDK host specific feature
@@ -286,16 +285,16 @@ i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
* DPDK VF also can't identify host driver by version number returned.
* It always assume talking with Linux PF.
*/
- info.major = I40E_VIRTCHNL_VERSION_MAJOR;
- info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ info.major = VIRTCHNL_VERSION_MAJOR;
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
if (b_op)
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_SUCCESS,
(uint8_t *)&info,
sizeof(info));
else
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
I40E_NOT_SUPPORTED,
(uint8_t *)&info,
sizeof(info));
@@ -313,22 +312,22 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
static int
i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
- struct i40e_virtchnl_vf_resource *vf_res = NULL;
+ struct virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
int ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
/* only have 1 VSI by default */
- len = sizeof(struct i40e_virtchnl_vf_resource) +
+ len = sizeof(struct virtchnl_vf_resource) +
I40E_DEFAULT_VF_VSI_NUM *
- sizeof(struct i40e_virtchnl_vsi_resource);
+ sizeof(struct virtchnl_vsi_resource);
vf_res = rte_zmalloc("i40e_vf_res", len, 0);
if (vf_res == NULL) {
@@ -339,21 +338,21 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
goto send_msg;
}
- vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
vf_res->num_queue_pairs = vf->vsi->nb_qps;
vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
/* Change below setting if PF host can support more VSIs for VF */
- vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
ether_addr_copy(&vf->mac_addr,
(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
ret, (uint8_t *)vf_res, len);
rte_free(vf_res);
@@ -363,7 +362,7 @@ send_msg:
static int
i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
- struct i40e_virtchnl_rxq_info *rxq,
+ struct virtchnl_rxq_info *rxq,
uint8_t crcstrip)
{
int err = I40E_SUCCESS;
@@ -431,7 +430,7 @@ i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
- struct i40e_virtchnl_txq_info *txq)
+ struct virtchnl_txq_info *txq)
{
int err = I40E_SUCCESS;
struct i40e_hmc_obj_txq tx_ctx;
@@ -480,14 +479,14 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
- struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
- (struct i40e_virtchnl_vsi_queue_config_info *)msg;
- struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct virtchnl_vsi_queue_config_info *vc_vqci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -511,9 +510,9 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
/*
* Apply VF RX queue setting to HMC.
- * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
* then the extra information of
- * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
+ * 'struct virtchnl_queue_pair_extra_info' is needed,
* otherwise set the last parameter to NULL.
*/
if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
@@ -533,7 +532,7 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
ret, NULL, 0);
return ret;
@@ -547,15 +546,15 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
- struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
- (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
- struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct virtchnl_vsi_queue_config_ext_info *vc_vqcei =
+ (struct virtchnl_vsi_queue_config_ext_info *)msg;
+ struct virtchnl_queue_pair_ext_info *vc_qpei;
int i, ret = I40E_SUCCESS;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -578,9 +577,9 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
}
/*
* Apply VF RX queue setting to HMC.
- * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
* then the extra information of
- * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
+ * 'struct virtchnl_queue_pair_ext_info' is needed,
* otherwise set the last parameter to NULL.
*/
if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
@@ -600,7 +599,7 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
ret, NULL, 0);
return ret;
@@ -608,7 +607,7 @@ send_msg:
static void
i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_vector_map *vvm)
+ struct virtchnl_vector_map *vvm)
{
#define BITS_PER_CHAR 8
uint64_t linklistmap = 0, tempmap;
@@ -711,9 +710,9 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
int ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
- struct i40e_virtchnl_irq_map_info *irqmap =
- (struct i40e_virtchnl_irq_map_info *)msg;
- struct i40e_virtchnl_vector_map *map;
+ struct virtchnl_irq_map_info *irqmap =
+ (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_vector_map *map;
int i;
uint16_t vector_id;
unsigned long qbit_max;
@@ -721,12 +720,12 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
- if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
+ if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) {
PMD_DRV_LOG(ERR, "buffer too short");
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -773,7 +772,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
ret, NULL, 0);
return ret;
@@ -781,7 +780,7 @@ send_msg:
static int
i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
- struct i40e_virtchnl_queue_select *qsel,
+ struct virtchnl_queue_select *qsel,
bool on)
{
int ret = I40E_SUCCESS;
@@ -831,8 +830,8 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
uint16_t msglen)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_queue_select *q_sel =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
if (msg == NULL || msglen != sizeof(*q_sel)) {
ret = I40E_ERR_PARAM;
@@ -841,7 +840,7 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
ret = i40e_pf_host_switch_queues(vf, q_sel, true);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
ret, NULL, 0);
return ret;
@@ -854,13 +853,13 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_queue_select *q_sel =
- (struct i40e_virtchnl_queue_select *)msg;
+ struct virtchnl_queue_select *q_sel =
+ (struct virtchnl_queue_select *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ VIRTCHNL_OP_DISABLE_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -872,7 +871,7 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
ret = i40e_pf_host_switch_queues(vf, q_sel, false);
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
ret, NULL, 0);
return ret;
@@ -886,8 +885,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_ether_addr_list *addr_list =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
struct i40e_mac_filter_info filter;
int i;
struct ether_addr *mac;
@@ -895,7 +894,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -920,7 +919,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret, NULL, 0);
return ret;
@@ -933,15 +932,15 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_ether_addr_list *addr_list =
- (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct virtchnl_ether_addr_list *addr_list =
+ (struct virtchnl_ether_addr_list *)msg;
int i;
struct ether_addr *mac;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ VIRTCHNL_OP_DEL_ETH_ADDR,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -962,7 +961,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
ret, NULL, 0);
return ret;
@@ -974,15 +973,15 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
int i;
uint16_t *vid;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_ADD_VLAN,
+ VIRTCHNL_OP_ADD_VLAN,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1002,7 +1001,7 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
ret, NULL, 0);
return ret;
@@ -1015,15 +1014,15 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
- (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct virtchnl_vlan_filter_list *)msg;
int i;
uint16_t *vid;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_OP_DEL_VLAN,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1042,7 +1041,7 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
}
send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
ret, NULL, 0);
return ret;
@@ -1056,15 +1055,15 @@ i40e_pf_host_process_cmd_config_promisc_mode(
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_promisc_info *promisc =
- (struct i40e_virtchnl_promisc_info *)msg;
+ struct virtchnl_promisc_info *promisc =
+ (struct virtchnl_promisc_info *)msg;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
bool unicast = FALSE, multicast = FALSE;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
@@ -1074,21 +1073,21 @@ i40e_pf_host_process_cmd_config_promisc_mode(
goto send_msg;
}
- if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ if (promisc->flags & FLAG_VF_UNICAST_PROMISC)
unicast = TRUE;
ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
vf->vsi->seid, unicast, NULL, true);
if (ret != I40E_SUCCESS)
goto send_msg;
- if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ if (promisc->flags & FLAG_VF_MULTICAST_PROMISC)
multicast = TRUE;
ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
multicast, NULL);
send_msg:
i40e_pf_host_send_msg_to_vf(vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
return ret;
}
@@ -1099,12 +1098,12 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
i40e_update_vsi_stats(vf->vsi);
if (b_op)
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
I40E_SUCCESS,
(uint8_t *)&vf->vsi->eth_stats,
sizeof(vf->vsi->eth_stats));
else
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS,
I40E_NOT_SUPPORTED,
(uint8_t *)&vf->vsi->eth_stats,
sizeof(vf->vsi->eth_stats));
@@ -1113,37 +1112,47 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
}
static int
-i40e_pf_host_process_cmd_cfg_vlan_offload(
- struct i40e_pf_vf *vf,
- uint8_t *msg,
- uint16_t msglen,
- bool b_op)
+i40e_pf_host_process_cmd_enable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_vlan_offload_info *offload =
- (struct i40e_virtchnl_vlan_offload_info *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
vf,
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
I40E_NOT_SUPPORTED, NULL, 0);
return ret;
}
- if (msg == NULL || msglen != sizeof(*offload)) {
- ret = I40E_ERR_PARAM;
- goto send_msg;
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, TRUE);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to enable vlan stripping");
+
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
+{
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
}
- ret = i40e_vsi_config_vlan_stripping(vf->vsi,
- !!offload->enable_vlan_strip);
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi, FALSE);
if (ret != 0)
- PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
+ PMD_DRV_LOG(ERR, "Failed to disable vlan stripping");
-send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
- ret, NULL, 0);
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ ret, NULL, 0);
return ret;
}
@@ -1155,8 +1164,8 @@ i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
bool b_op)
{
int ret = I40E_SUCCESS;
- struct i40e_virtchnl_pvid_info *tpid_info =
- (struct i40e_virtchnl_pvid_info *)msg;
+ struct virtchnl_pvid_info *tpid_info =
+ (struct virtchnl_pvid_info *)msg;
if (!b_op) {
i40e_pf_host_send_msg_to_vf(
@@ -1183,39 +1192,39 @@ send_msg:
void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
- struct i40e_virtchnl_pf_event event;
+ struct virtchnl_pf_event event;
- event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event = VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */
switch (dev->data->dev_link.link_speed) {
case ETH_SPEED_NUM_100M:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB;
break;
case ETH_SPEED_NUM_1G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB;
break;
case ETH_SPEED_NUM_10G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB;
break;
case ETH_SPEED_NUM_20G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB;
break;
case ETH_SPEED_NUM_25G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB;
break;
case ETH_SPEED_NUM_40G:
- event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
default:
event.event_data.link_event.link_speed =
- I40E_LINK_SPEED_UNKNOWN;
+ VIRTCHNL_LINK_SPEED_UNKNOWN;
break;
}
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
@@ -1231,7 +1240,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
struct i40e_pf_vf *vf;
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
- struct rte_pmd_i40e_mb_event_param cb_param;
+ struct rte_pmd_i40e_mb_event_param ret_param;
bool b_op = TRUE;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
@@ -1251,100 +1260,104 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
* initialise structure to send to user application
* will return response from user in retval field
*/
- cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
- cb_param.vfid = vf_id;
- cb_param.msg_type = opcode;
- cb_param.msg = (void *)msg;
- cb_param.msglen = msglen;
+ ret_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ ret_param.vfid = vf_id;
+ ret_param.msg_type = opcode;
+ ret_param.msg = (void *)msg;
+ ret_param.msglen = msglen;
/**
* Ask user application if we're allowed to perform those functions.
- * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * If we get ret_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
* then business as usual.
* If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
- if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
+ if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);
b_op = FALSE;
}
switch (opcode) {
- case I40E_VIRTCHNL_OP_VERSION :
+ case VIRTCHNL_OP_VERSION:
PMD_DRV_LOG(INFO, "OP_VERSION received");
i40e_pf_host_process_cmd_version(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_RESET_VF :
+ case VIRTCHNL_OP_RESET_VF:
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
i40e_pf_host_process_cmd_reset_vf(vf);
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
if (b_op) {
i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
i40e_notify_vf_link_status(dev, vf);
} else {
i40e_pf_host_send_msg_to_vf(
- vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_NOT_SUPPORTED, NULL, 0);
}
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
i40e_pf_host_process_cmd_add_ether_address(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
i40e_pf_host_process_cmd_del_ether_address(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
msglen, b_op);
break;
- case I40E_VIRTCHNL_OP_GET_STATS:
+ case VIRTCHNL_OP_GET_STATS:
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
i40e_pf_host_process_cmd_get_stats(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
- PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
- i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
- msglen, b_op);
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_ENABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_enable_vlan_strip(vf, b_op);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
+ i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index b4c22876..7afb7eae 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -35,7 +35,7 @@
#define _I40E_PF_H_
/* VERSION info to exchange between VF and PF host. In case VF works with
- * ND kernel driver, it reads I40E_VIRTCHNL_VERSION_MAJOR/MINOR. In
+ * ND kernel driver, it reads VIRTCHNL_VERSION_MAJOR/MINOR. In
* case works with DPDK host, it reads version below. Then VF realize who it
* is talking to and use proper language to communicate.
* */
@@ -49,45 +49,44 @@
#define I40E_DPDK_OFFSET 0x100
/* DPDK pf driver specific command to VF */
-enum i40e_virtchnl_ops_dpdk {
+enum virtchnl_ops_dpdk {
/*
* Keep some gap between Linux PF commands and
* DPDK PF extended commands.
*/
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD = I40E_VIRTCHNL_OP_VERSION +
- I40E_DPDK_OFFSET,
- I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID = VIRTCHNL_OP_VERSION +
+ I40E_DPDK_OFFSET,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
};
/* A structure to support extended info of a receive queue. */
-struct i40e_virtchnl_rxq_ext_info {
+struct virtchnl_rxq_ext_info {
uint8_t crcstrip;
};
/*
* A structure to support extended info of queue pairs, an additional field
- * is added, comparing to original 'struct i40e_virtchnl_queue_pair_info'.
+ * is added, comparing to original 'struct virtchnl_queue_pair_info'.
*/
-struct i40e_virtchnl_queue_pair_ext_info {
+struct virtchnl_queue_pair_ext_info {
/* vsi_id and queue_id should be identical for both rx and tx queues.*/
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
- struct i40e_virtchnl_rxq_ext_info rxq_ext;
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+ struct virtchnl_rxq_ext_info rxq_ext;
};
/*
* A structure to support extended info of VSI queue pairs,
- * 'struct i40e_virtchnl_queue_pair_ext_info' is used, see its original
- * of 'struct i40e_virtchnl_queue_pair_info'.
+ * 'struct virtchnl_queue_pair_ext_info' is used, see its original
+ * of 'struct virtchnl_queue_pair_info'.
*/
-struct i40e_virtchnl_vsi_queue_config_ext_info {
+struct virtchnl_vsi_queue_config_ext_info {
uint16_t vsi_id;
uint16_t num_queue_pairs;
- struct i40e_virtchnl_queue_pair_ext_info qpair[0];
+ struct virtchnl_queue_pair_ext_info qpair[0];
};
-struct i40e_virtchnl_vlan_offload_info {
+struct virtchnl_vlan_offload_info {
uint16_t vsi_id;
uint8_t enable_vlan_strip;
uint8_t reserved;
@@ -106,7 +105,7 @@ struct i40e_virtchnl_vlan_offload_info {
* enable op, needs to specify the pvid. PF returns status
* code in retval.
*/
-struct i40e_virtchnl_pvid_info {
+struct virtchnl_pvid_info {
uint16_t vsi_id;
struct i40e_vsi_vlan_pvid_info info;
};
@@ -114,7 +113,7 @@ struct i40e_virtchnl_pvid_info {
int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset);
void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint16_t abs_vf_id, uint32_t opcode,
- __rte_unused uint32_t retval,
+ uint32_t retval,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
int i40e_pf_host_uninit(struct rte_eth_dev *dev);
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 351cb94d..d42c23c0 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1257,7 +1257,7 @@ end_of_tx:
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
@@ -1608,7 +1608,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq = dev->data->rx_queues[rx_queue_id];
/*
- * rx_queue_id is queue id aplication refers to, while
+ * rx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
@@ -1639,7 +1639,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
@@ -1664,7 +1664,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
txq = dev->data->tx_queues[tx_queue_id];
/*
- * tx_queue_id is queue id aplication refers to, while
+ * tx_queue_id is queue id application refers to, while
* txq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
@@ -2474,7 +2474,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
case I40E_FLAG_HEADER_SPLIT_DISABLED:
default:
rxq->rx_hdr_len = 0;
- rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
rxq->hs_mode = i40e_header_split_none;
break;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 69209668..39a6da06 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -102,7 +102,7 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
return pkt_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
struct i40e_tx_entry *txep;
@@ -159,7 +159,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
return txq->tx_rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct i40e_tx_entry *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 3b4a352e..779f14e5 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -87,6 +87,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
mb1 = rxep[1].mbuf;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -117,7 +119,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
}
static inline void
-desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
+desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4],
struct rte_mbuf **rx_pkts)
{
const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
@@ -203,6 +205,12 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
@@ -252,6 +260,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-rxq->crc_len, /* sub crc on pkt_len */
0, 0 /* ignore pkt_type field */
);
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
__m128i dd_check, eop_check;
/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
@@ -296,6 +313,19 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF, /* pkt_type set as unknown */
0xFF, 0xFF /*pkt_type set as unknown */
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
@@ -621,11 +651,5 @@ i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
int __attribute__((cold))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
- /* need SSE4.1 support */
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- return -1;
-#endif
-
return i40e_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
new file mode 100644
index 00000000..d90313af
--- /dev/null
+++ b/drivers/net/i40e/i40e_tm.c
@@ -0,0 +1,976 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops i40e_tm_ops = {
+ .capabilities_get = i40e_tm_capabilities_get,
+ .shaper_profile_add = i40e_shaper_profile_add,
+ .shaper_profile_delete = i40e_shaper_profile_del,
+ .node_add = i40e_node_add,
+ .node_delete = i40e_node_delete,
+ .node_type_get = i40e_node_type_get,
+ .level_capabilities_get = i40e_level_capabilities_get,
+ .node_capabilities_get = i40e_node_capabilities_get,
+ .hierarchy_commit = i40e_hierarchy_commit,
+};
+
+int
+i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &i40e_tm_ops;
+
+ return 0;
+}
+
+void
+i40e_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+
+ /* initialize node configuration */
+ pf->tm_conf.root = NULL;
+ TAILQ_INIT(&pf->tm_conf.tc_list);
+ TAILQ_INIT(&pf->tm_conf.queue_list);
+ pf->tm_conf.nb_tc_node = 0;
+ pf->tm_conf.nb_queue_node = 0;
+ pf->tm_conf.committed = false;
+}
+
+void
+i40e_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_tc_node = 0;
+ if (pf->tm_conf.root) {
+ rte_free(pf->tm_conf.root);
+ pf->tm_conf.root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint16_t
+i40e_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ uint16_t sum = 0;
+ int i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (main_vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+
+ return sum;
+}
+
+static int
+i40e_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t tc_nb = i40e_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->func_caps.num_tx_qp)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * support port + TCs + queues
+ * here shows the max capability not the current configuration.
+ */
+ cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
+ cap->n_levels_max = 3; /* port, TC, queue */
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->func_caps.num_tx_qp;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct i40e_tm_shaper_profile *
+i40e_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_shaper_profile_list *shaper_profile_list =
+ &pf->tm_conf.shaper_profile_list;
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = i40e_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
+ sizeof(struct i40e_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ (void)rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+i40e_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct i40e_tm_node *
+i40e_tm_node_search(struct rte_eth_dev *dev,
+ uint32_t node_id, enum i40e_tm_node_type *node_type)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node *tm_node;
+
+ if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_PORT;
+ return pf->tm_conf.root;
+ }
+
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = I40E_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for root node */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for TC or queue node */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_node *tm_node;
+ struct i40e_tm_node *parent_node;
+ uint16_t tc_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = i40e_node_param_check(node_id, parent_node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (i40e_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ shaper_profile = i40e_shaper_profile_search(dev,
+ params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > I40E_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (pf->tm_conf.root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ pf->tm_conf.root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = i40e_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
+ parent_node_type != I40E_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ /* check the TC number */
+ tc_nb = i40e_tc_nb_get(dev);
+ if (pf->tm_conf.nb_tc_node >= tc_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check the queue number */
+ if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= hw->func_caps.num_tx_qp) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("i40e_tm_node",
+ sizeof(struct i40e_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = pf->tm_conf.root;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
+ tm_node, node);
+ pf->tm_conf.nb_tc_node++;
+ } else {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
+ tm_node, node);
+ pf->tm_conf.nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == I40E_TM_NODE_TYPE_PORT) {
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ pf->tm_conf.root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == I40E_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ pf->tm_conf.nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ pf->tm_conf.nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
+ struct i40e_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+i40e_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= I40E_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == I40E_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->nonleaf.shaper_private_rate_max = 5000000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ cap->nonleaf.sched_n_children_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (level_id == I40E_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->func_caps.num_tx_qp;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
+ cap->non_leaf_nodes_identical = true;
+ }
+ cap->leaf_nodes_identical = true;
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->leaf.shaper_private_rate_max = 5000000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum i40e_tm_node_type node_type;
+ struct i40e_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = i40e_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 40Gbps -> 5GBps */
+ cap->shaper_private_rate_max = 5000000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == I40E_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ I40E_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->func_caps.num_tx_qp;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+i40e_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct i40e_tm_node *tm_node;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ uint64_t bw;
+ uint8_t tc_map;
+ int ret;
+ int i;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!pf->tm_conf.root)
+ goto done;
+
+ vsi = pf->main_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /**
+ * Don't support bandwidth control for port and TCs in parallel.
+ * If the port has a max bandwidth, the TCs should have none.
+ */
+ /* port */
+ bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
+ if (bw) {
+ /* check if any TC has a max bandwidth */
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port and TC max bandwidth"
+ " in parallel";
+ goto fail_clear;
+ }
+ }
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ /* set the max bandwidth */
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
+ (uint16_t)bw, 0, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set port max bandwidth";
+ goto fail_clear;
+ }
+
+ goto done;
+ }
+
+ /* TC */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ tc_map = vsi->enabled_tc;
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (!tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "TC without queue assigned";
+ goto fail_clear;
+ }
+
+ i = 0;
+ while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
+ i++;
+ if (i >= I40E_MAX_TRAFFIC_CLASS) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "cannot find the TC";
+ goto fail_clear;
+ }
+ tc_map &= ~BIT_ULL(i);
+
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (!bw)
+ continue;
+
+ /* interpret Bps to 50Mbps */
+ bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
+
+ tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw);
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (bw) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "not support queue QoS";
+ goto fail_clear;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "fail to set TC max bandwidth";
+ goto fail_clear;
+ }
+
+done:
+ pf->tm_conf.committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ i40e_tm_conf_uninit(dev);
+ i40e_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index f7ce62bb..f12b7f4a 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -40,15 +40,6 @@
#include "i40e_rxtx.h"
#include "rte_pmd_i40e.h"
-/* The max bandwidth of i40e is 40Gbps. */
-#define I40E_QOS_BW_MAX 40000
-/* The bandwidth should be the multiple of 50Mbps. */
-#define I40E_QOS_BW_GRANULARITY 50
-/* The min bandwidth weight is 1. */
-#define I40E_QOS_BW_WEIGHT_MIN 1
-/* The max bandwidth weight is 127. */
-#define I40E_QOS_BW_WEIGHT_MAX 127
-
int
rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
{
@@ -1468,7 +1459,7 @@ rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
return ret;
}
-#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
#define I40E_MAX_PROFILE_NUM 16
static void
@@ -1520,9 +1511,6 @@ i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
return status;
}
-#define I40E_PROFILE_INFO_SIZE 48
-#define I40E_MAX_PROFILE_NUM 16
-
/* Check if the profile info exists */
static int
i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
@@ -1557,11 +1545,7 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
sizeof(struct i40e_profile_section_header));
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
- if ((pinfo->track_id == p->track_id) &&
- !memcmp(&pinfo->version, &p->version,
- sizeof(struct i40e_ddp_version)) &&
- !memcmp(&pinfo->name, &p->name,
- I40E_DDP_NAME_SIZE)) {
+ if (pinfo->track_id == p->track_id) {
PMD_DRV_LOG(INFO, "Profile exists.");
rte_free(buff);
return 1;
@@ -1587,6 +1571,13 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
int is_exist;
enum i40e_status_code status = I40E_SUCCESS;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ return -ENOTSUP;
+ }
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
@@ -1623,6 +1614,10 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return -EINVAL;
}
track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ PMD_DRV_LOG(ERR, "Invalid track_id");
+ return -EINVAL;
+ }
/* Find profile segment */
profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
@@ -1642,46 +1637,231 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return -EINVAL;
}
+ /* Check if the profile already loaded */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec,
+ op == RTE_PMD_I40E_PKG_OP_WR_ADD);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
- /* Check if the profile exists */
- i40e_generate_profile_info_sec(
- ((struct i40e_profile_segment *)profile_seg_hdr)->name,
- &((struct i40e_profile_segment *)profile_seg_hdr)->version,
- track_id, profile_info_sec, 1);
- is_exist = i40e_check_profile_info(port, profile_info_sec);
- if (is_exist > 0) {
+ if (is_exist) {
PMD_DRV_LOG(ERR, "Profile already exists.");
rte_free(profile_info_sec);
- return 1;
- } else if (is_exist < 0) {
- PMD_DRV_LOG(ERR, "Failed to check profile.");
+ return -EEXIST;
+ }
+ } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ if (!is_exist) {
+ PMD_DRV_LOG(ERR, "Profile does not exist.");
rte_free(profile_info_sec);
- return -EINVAL;
+ return -EACCES;
}
+ }
- /* Write profile to HW */
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ status = i40e_rollback_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+ } else {
status = i40e_write_profile(
- hw,
- (struct i40e_profile_segment *)profile_seg_hdr,
- track_id);
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
if (status) {
- PMD_DRV_LOG(ERR, "Failed to write profile.");
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to write profile for add.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
rte_free(profile_info_sec);
return status;
}
+ }
- /* Add profile info to info list */
+ if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
+ /* Modify loaded profiles info list */
status = i40e_add_rm_profile_info(hw, profile_info_sec);
- if (status)
- PMD_DRV_LOG(ERR, "Failed to add profile info.");
- } else {
- PMD_DRV_LOG(ERR, "Operation not supported.");
+ if (status) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
+ }
}
rte_free(profile_info_sec);
return status;
}
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
+ uint8_t *info_buff, uint32_t info_size,
+ enum rte_pmd_i40e_package_info type)
+{
+ uint32_t ret_size;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *i40e_seg_hdr;
+ struct i40e_generic_seg_header *note_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+
+ if (!info_buff) {
+ PMD_DRV_LOG(ERR, "Output info buff is invalid.");
+ return -EINVAL;
+ }
+
+ if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Package buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)pkg_buff;
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+
+ /* Find global notes segment */
+ note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
+ pkg_hdr);
+
+ /* Find i40e profile segment */
+ i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+
+ /* get global header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get global note size */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ if (note_seg_hdr == NULL)
+ ret_size = 0;
+ else
+ ret_size = note_seg_hdr->size;
+ *(uint32_t *)info_buff = ret_size;
+ return I40E_SUCCESS;
+ }
+
+ /* get global note */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
+ if (note_seg_hdr == NULL)
+ return -ENOTSUP;
+ if (info_size < note_seg_hdr->size) {
+ PMD_DRV_LOG(ERR, "Information buffer size is too small");
+ return -EINVAL;
+ }
+ memcpy(info_buff, &note_seg_hdr[1], note_seg_hdr->size);
+ return I40E_SUCCESS;
+ }
+
+ /* get i40e segment header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ if (!i40e_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get number of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ *(uint32_t *)info_buff =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ return I40E_SUCCESS;
+ }
+
+ /* get list of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
+ uint32_t dev_num;
+ dev_num =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ memcpy(info_buff,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
+ sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
+ return I40E_SUCCESS;
+ }
+
+ PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
+ return -EINVAL;
+}
+
int
rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
{
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index 1efb2c4b..356fa89d 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -59,7 +59,7 @@ enum rte_pmd_i40e_mb_event_rsp {
*/
struct rte_pmd_i40e_mb_event_param {
uint16_t vfid; /**< Virtual Function number */
- uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */
+ uint16_t msg_type; /**< VF to PF message type, see virtchnl_ops */
uint16_t retval; /**< return value */
void *msg; /**< pointer to message */
uint16_t msglen; /**< length of the message */
@@ -71,9 +71,26 @@ struct rte_pmd_i40e_mb_event_param {
enum rte_pmd_i40e_package_op {
RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */
+ RTE_PMD_I40E_PKG_OP_WR_DEL, /**< load package and delete from info list */
+ RTE_PMD_I40E_PKG_OP_WR_ONLY, /**< load package without modifying info list */
RTE_PMD_I40E_PKG_OP_MAX = 32
};
+/**
+ * Types of package information.
+ */
+enum rte_pmd_i40e_package_info {
+ RTE_PMD_I40E_PKG_INFO_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES,
+ RTE_PMD_I40E_PKG_INFO_GLOBAL_MAX = 1024,
+ RTE_PMD_I40E_PKG_INFO_HEADER,
+ RTE_PMD_I40E_PKG_INFO_DEVID_NUM,
+ RTE_PMD_I40E_PKG_INFO_DEVID_LIST,
+ RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF
+};
+
#define RTE_PMD_I40E_DDP_NAME_SIZE 32
/**
@@ -88,6 +105,14 @@ struct rte_pmd_i40e_ddp_version {
};
/**
+ * Device ID for dynamic device personalization.
+ */
+struct rte_pmd_i40e_ddp_device_id {
+ uint32_t vendor_dev_id;
+ uint32_t sub_vendor_dev_id;
+};
+
+/**
* Profile information in profile info list.
*/
struct rte_pmd_i40e_profile_info {
@@ -98,6 +123,8 @@ struct rte_pmd_i40e_profile_info {
uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
};
+#define RTE_PMD_I40E_DDP_OWNER_UNKNOWN 0xFF
+
/**
* Profile information list returned from HW.
*/
@@ -492,13 +519,36 @@ int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
* - (0) if successful.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
- * - (1) if profile exists.
+ * - (-EEXIST) if profile exists.
+ * - (-EACCES) if profile does not exist.
+ * - (-ENOTSUP) if operation not supported.
*/
int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op);
/**
+ * rte_pmd_i40e_get_ddp_info - Get profile's info
+ * @param pkg
+ * buffer of package.
+ * @param pkg_size
+ * package buffer size
+ * @param info
+ * buffer for response
+ * @param size
+ * response buffer size
+ * @param type
+ * type of information requested
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if information type not supported by the profile.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg, uint32_t pkg_size,
+ uint8_t *info, uint32_t size,
+ enum rte_pmd_i40e_package_info type);
+
+/**
* rte_pmd_i40e_get_ddp_list - Get loaded profile list
* @param port
* port id
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index 3b0e805d..20cc9801 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -38,3 +38,10 @@ DPDK_17.05 {
rte_pmd_i40e_get_ddp_list;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_pmd_i40e_get_ddp_info;
+
+} DPDK_17.05;
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 5529d81c..5e57cb35 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -119,11 +119,12 @@ else
SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
endif
-ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index a61617be..8c833b44 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2017.03.29 released by the team which develop
+cid-10g-shared-code.2017.05.16 released by the team which develop
basic drivers for any ixgbe NIC. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index 4dabb434..7f85713e 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -504,7 +504,8 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
}
/* Initialize the LED link active for LED blink support */
- hw->mac.ops.init_led_link_act(hw);
+ if (hw->mac.ops.init_led_link_act)
+ hw->mac.ops.init_led_link_act(hw);
if (status != IXGBE_SUCCESS)
DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index 674dc144..9862391b 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -86,6 +86,10 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
/* Manageability interface */
mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->mac.ops.led_on = NULL;
+ hw->mac.ops.led_off = NULL;
+ break;
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
@@ -459,9 +463,13 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_A_10G_T:
- case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->phy.type = ixgbe_phy_ext_1g_t;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
hw->phy.type = ixgbe_phy_fw;
@@ -751,6 +759,11 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
phy->ops.set_phy_power = NULL;
phy->ops.get_firmware_version = NULL;
break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
+ phy->ops.set_phy_power = NULL;
+ break;
default:
phy->ops.identify = ixgbe_identify_phy_x550em;
}
@@ -945,6 +958,11 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
ixgbe_write_i2c_combined_generic_unlocked;
link->addr = IXGBE_CS4227;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
+ mac->ops.setup_fc = NULL;
+ mac->ops.setup_eee = NULL;
+ mac->ops.init_led_link_act = NULL;
+ }
return ret_val;
}
@@ -1915,6 +1933,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
ixgbe_setup_mac_link_sfp_x550em;
break;
case ixgbe_media_type_copper:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
+ break;
if (hw->mac.type == ixgbe_mac_X550EM_a) {
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
@@ -2380,10 +2400,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
break;
- case IXGBE_DEV_ID_X550EM_X_1G_T:
- phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
- phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
- break;
default:
break;
}
@@ -2414,6 +2430,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
case ixgbe_phy_ext_1g_t:
/* link is managed by FW */
phy->ops.setup_link = NULL;
+ phy->ops.reset = NULL;
break;
case ixgbe_phy_x550em_xfi:
/* link is managed by HW */
@@ -2565,10 +2582,9 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status != IXGBE_SUCCESS) {
ERROR_REPORT2(IXGBE_ERROR_CAUTION,
- "semaphore failed with %d", status);
+ "semaphore failed with %d", status);
return IXGBE_ERR_SWFW_SYNC;
}
-
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c
index 70069284..38a44936 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_bypass.c
@@ -36,6 +36,7 @@
#include <rte_ethdev.h>
#include "ixgbe_ethdev.h"
#include "ixgbe_bypass_api.h"
+#include "rte_pmd_ixgbe.h"
#define BYPASS_STATUS_OFF_MASK 3
@@ -284,7 +285,7 @@ ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
/* disable the timer with timeout of zero */
- if (timeout == RTE_BYPASS_TMT_OFF) {
+ if (timeout == RTE_PMD_IXGBE_BYPASS_TMT_OFF) {
status = 0x0; /* WDG enable off */
mask = BYPASS_WDT_ENABLE_M;
} else {
@@ -355,7 +356,7 @@ ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
wdg = by_ctl & BYPASS_WDT_ENABLE_M;
if (!wdg)
- *wd_timeout = RTE_BYPASS_TMT_OFF;
+ *wd_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
else
*wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
BYPASS_WDT_MASK;
diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h
index 5f5c63e3..09155bb3 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.h
+++ b/drivers/net/ixgbe/ixgbe_bypass.h
@@ -34,7 +34,7 @@
#ifndef _IXGBE_BYPASS_H_
#define _IXGBE_BYPASS_H_
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
struct ixgbe_bypass_mac_ops {
s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status);
@@ -63,6 +63,6 @@ s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev);
s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw);
s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw);
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
#endif /* _IXGBE_BYPASS_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_bypass_api.h b/drivers/net/ixgbe/ixgbe_bypass_api.h
index aec8f1ec..d52fde04 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_api.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_api.h
@@ -34,7 +34,7 @@
#ifndef _IXGBE_BYPASS_API_H_
#define _IXGBE_BYPASS_API_H_
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
#include "ixgbe_bypass_defines.h"
/**
@@ -295,6 +295,6 @@ static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
return 0;
}
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
#endif /* _IXGBE_BYPASS_API_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h
index cafcb278..d12c2714 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_defines.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h
@@ -34,7 +34,7 @@
#ifndef _IXGBE_BYPASS_DEFINES_H_
#define _IXGBE_BYPASS_DEFINES_H_
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
#define msleep(x) rte_delay_us(x*1000)
#define usleep_range(min, max) rte_delay_us(min)
@@ -155,6 +155,6 @@ enum ixgbe_state_t {
#define IXGBE_BYPASS_FW_WRITE_FAILURE -35
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
#endif /* _IXGBE_BYPASS_DEFINES_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index aeaa432c..22171d86 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -57,7 +57,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -187,13 +186,13 @@ ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint64_t *values, unsigned int n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
-static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int size);
-static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+ unsigned int size);
+static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit);
static int ixgbe_dev_xstats_get_names_by_id(
- __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids,
unsigned int limit);
@@ -240,7 +239,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
-static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -262,6 +261,8 @@ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
+static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
@@ -302,9 +303,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
static void ixgbe_configure_msix(struct rte_eth_dev *dev);
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
- uint16_t queue_idx, uint16_t tx_rate);
-
static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
@@ -444,13 +442,8 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = {
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
@@ -481,7 +474,7 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = {
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
#endif
{ .vendor_id = 0, /* sentinel */ },
@@ -575,17 +568,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
-#ifdef RTE_NIC_BYPASS
- .bypass_init = ixgbe_bypass_init,
- .bypass_state_set = ixgbe_bypass_state_store,
- .bypass_state_show = ixgbe_bypass_state_show,
- .bypass_event_set = ixgbe_bypass_event_store,
- .bypass_event_show = ixgbe_bypass_event_show,
- .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store,
- .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
- .bypass_ver_show = ixgbe_bypass_ver_show,
- .bypass_wd_reset = ixgbe_bypass_wd_reset,
-#endif /* RTE_NIC_BYPASS */
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
.filter_ctrl = ixgbe_dev_filter_ctrl,
@@ -608,6 +590,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set,
.udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del,
+ .tm_ops_get = ixgbe_tm_ops_get,
};
/*
@@ -618,7 +601,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.dev_configure = ixgbevf_dev_configure,
.dev_start = ixgbevf_dev_start,
.dev_stop = ixgbevf_dev_stop,
- .link_update = ixgbe_dev_link_update,
+ .link_update = ixgbevf_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
.xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
@@ -1131,7 +1114,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
static int
eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -1190,11 +1173,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
hw->allow_unsupported_sfp = 1;
/* Initialize the shared code (base driver) */
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
diag = ixgbe_bypass_init_shared_code(hw);
#else
diag = ixgbe_init_shared_code(hw);
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
if (diag != IXGBE_SUCCESS) {
PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
@@ -1227,11 +1210,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return -EIO;
}
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
diag = ixgbe_bypass_init_hw(hw);
#else
diag = ixgbe_init_hw(hw);
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
/*
* Devices with copper phys will fail to initialise if ixgbe_init_hw()
@@ -1359,13 +1342,16 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
+ /* initialize Traffic Manager configuration */
+ ixgbe_tm_conf_init(eth_dev);
+
return 0;
}
static int
eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
@@ -1412,6 +1398,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* clear all the filters list */
ixgbe_filterlist_flush();
+ /* Remove all Traffic Manager configuration */
+ ixgbe_tm_conf_uninit(eth_dev);
+
return 0;
}
@@ -1491,7 +1480,7 @@ static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
TAILQ_INIT(&fdir_info->fdir_list);
snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
- "fdir_%s", eth_dev->data->name);
+ "fdir_%s", eth_dev->device->name);
fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
if (!fdir_info->hash_handle) {
PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
@@ -1527,7 +1516,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
TAILQ_INIT(&l2_tn_info->l2_tn_list);
snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
- "l2_tn_%s", eth_dev->data->name);
+ "l2_tn_%s", eth_dev->device->name);
l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
if (!l2_tn_info->hash_handle) {
PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
@@ -1598,7 +1587,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
{
int diag;
uint32_t tc, tcs;
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -1747,7 +1736,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
@@ -2176,7 +2165,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
static int
ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
switch (nb_rx_q) {
case 1:
@@ -2425,7 +2414,7 @@ ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t total_rate = 0;
struct rte_pci_device *pci_dev;
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
rte_eth_link_get_nowait(dev->data->port_id, &link);
if (vf >= pci_dev->max_vfs)
@@ -2496,7 +2485,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
@@ -2505,6 +2494,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
int status;
uint16_t vf, idx;
uint32_t *link_speeds;
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -2651,9 +2642,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
speed = 0x0;
if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
- speed = (hw->mac.type != ixgbe_mac_82598EB) ?
- IXGBE_LINK_SPEED_82599_AUTONEG :
- IXGBE_LINK_SPEED_82598_AUTONEG;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ speed = IXGBE_LINK_SPEED_82598_AUTONEG;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+ break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ speed = IXGBE_LINK_SPEED_X550_AUTONEG;
+ break;
+ default:
+ speed = IXGBE_LINK_SPEED_82599_AUTONEG;
+ }
} else {
if (*link_speeds & ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
@@ -2672,7 +2676,9 @@ skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- ixgbe_dev_lsc_interrupt_setup(dev);
+ ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
+ else
+ ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
ixgbe_dev_macsec_interrupt_setup(dev);
} else {
rte_intr_callback_unregister(intr_handle,
@@ -2695,6 +2701,11 @@ skip_link_setup:
ixgbe_l2_tunnel_conf(dev);
ixgbe_filter_restore(dev);
+ if (tm_conf->root && !tm_conf->committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
return 0;
error:
@@ -2714,9 +2725,11 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -2763,6 +2776,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+
+ /* reset hierarchy commit */
+ tm_conf->committed = false;
}
/*
@@ -2774,7 +2790,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type == ixgbe_mac_82599EB) {
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
PMD_INIT_LOG(ERR, "Set link up is not supported "
@@ -2804,7 +2820,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type == ixgbe_mac_82599EB) {
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
PMD_INIT_LOG(ERR, "Set link down is not supported "
@@ -3194,7 +3210,7 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
}
static int ixgbe_dev_xstats_get_names_by_id(
- __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids,
unsigned int limit)
@@ -3582,7 +3598,7 @@ ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
static void
ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
@@ -3685,6 +3701,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw->mac.type == ixgbe_mac_X550_vf) {
dev_info->speed_capa |= ETH_LINK_SPEED_100M;
}
+ if (hw->mac.type == ixgbe_mac_X550) {
+ dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+ }
}
static const uint32_t *
@@ -3717,6 +3737,12 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
return ptypes;
+
+#if defined(RTE_ARCH_X86)
+ if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+ return ptypes;
+#endif
return NULL;
}
@@ -3724,7 +3750,7 @@ static void
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
dev_info->pci_dev = pci_dev;
@@ -3776,9 +3802,116 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->tx_desc_lim = tx_desc_lim;
}
+static int
+ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ int *link_up, int wait_to_complete)
+{
+ /**
+ * for a quick link status checking, wait_to_compelet == 0,
+ * skip PF link status checking
+ */
+ bool no_pflink_check = wait_to_complete == 0;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ uint32_t links_reg, in_msg;
+ int ret_val = 0;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ rte_delay_us(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ if (no_pflink_check) {
+ if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
+ mac->get_link_status = true;
+ else
+ mac->get_link_status = false;
+
+ goto out;
+ }
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
/* return 0 means link status changed, -1 means not changed */
static int
-ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
@@ -3788,6 +3921,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
int link_up;
int diag;
u32 speed = 0;
+ int wait = 1;
bool autoneg = false;
link.link_status = ETH_LINK_DOWN;
@@ -3808,9 +3942,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+ wait = 0;
+
+ if (vf)
+ diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
else
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+ diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
@@ -3847,6 +3984,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_speed = ETH_SPEED_NUM_1G;
break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+
case IXGBE_LINK_SPEED_10GB_FULL:
link.link_speed = ETH_SPEED_NUM_10G;
break;
@@ -3859,6 +4004,18 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
return 0;
}
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
+}
+
+static int
+ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
+}
+
static void
ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
@@ -3916,19 +4073,24 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable.
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
ixgbe_dev_link_status_print(dev);
- intr->mask |= IXGBE_EICR_LSC;
+ if (on)
+ intr->mask |= IXGBE_EICR_LSC;
+ else
+ intr->mask &= ~IXGBE_EICR_LSC;
return 0;
}
@@ -4035,7 +4197,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
static void
ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -4143,7 +4305,7 @@ static void
ixgbe_dev_interrupt_delayed_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -4166,12 +4328,13 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
if (intr->flags & IXGBE_FLAG_MACSEC) {
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
- NULL);
+ NULL, NULL);
intr->flags &= ~IXGBE_FLAG_MACSEC;
}
@@ -4660,7 +4823,7 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
static void
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
ixgbe_remove_rar(dev, 0);
@@ -4670,7 +4833,7 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
static bool
is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- if (strcmp(dev->data->drv_name, drv->driver.name))
+ if (strcmp(dev->device->driver->name, drv->driver.name))
return false;
return true;
@@ -4690,7 +4853,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ struct rte_eth_dev_data *dev_data = dev->data;
ixgbe_dev_info_get(dev, &dev_info);
@@ -4698,13 +4861,15 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
return -EINVAL;
- /* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before.
+ /* If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
- dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
return -EINVAL;
+ }
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -4799,7 +4964,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t intr_vector = 0;
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int err, mask = 0;
@@ -4863,7 +5028,7 @@ static void
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -5316,6 +5481,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
memset(&mr_info->mr_conf[rule_id], 0,
sizeof(struct rte_eth_mirror_conf));
@@ -5336,7 +5504,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
@@ -5370,7 +5538,7 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
@@ -5473,7 +5641,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
tmp |= (msix_vector << (8 * (queue & 0x3)));
IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
} else if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550)) {
if (direction == -1) {
/* other causes */
idx = ((queue & 1) * 8);
@@ -5495,7 +5664,7 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
static void
ixgbevf_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5529,7 +5698,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
static void
ixgbe_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5581,6 +5750,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
break;
default:
@@ -5598,8 +5768,9 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
}
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
- uint16_t queue_idx, uint16_t tx_rate)
+int
+ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t rf_dec, rf_int;
@@ -7531,7 +7702,7 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
bool en)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
int ret = 0;
uint32_t vmtir, vmvir;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -7879,7 +8050,8 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
/* PF reset VF event */
if (in_msg == IXGBE_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL, NULL);
}
static int
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index b576a6f4..caa50c8b 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -33,12 +33,15 @@
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
+#include "base/ixgbe_type.h"
#include "base/ixgbe_dcb.h"
#include "base/ixgbe_dcb_82599.h"
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
#include <rte_time.h>
#include <rte_hash.h>
+#include <rte_pci.h>
+#include <rte_tm_driver.h>
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -72,7 +75,7 @@
#endif
#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
-/* EITR Inteval is in 2048ns uinits for 1G and 10G link */
+/* EITR Interval is in 2048ns uinits for 1G and 10G link */
#define IXGBE_EITR_INTERVAL_UNIT_NS 2048
#define IXGBE_EITR_ITR_INT_SHIFT 3
#define IXGBE_EITR_INTERVAL_US(us) \
@@ -152,6 +155,13 @@
return -ENOTSUP;\
} while (0)
+/* Link speed for X550 auto negotiation */
+#define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_2_5GB_FULL | \
+ IXGBE_LINK_SPEED_5GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
/*
* Information about the fdir mode.
*/
@@ -189,6 +199,7 @@ struct ixgbe_fdir_rule {
uint32_t fdirflags; /* drop or forward */
uint32_t soft_id; /* an unique value for this rule */
uint8_t queue; /* assigned rx queue */
+ uint8_t flex_bytes_offset;
};
struct ixgbe_hw_fdir_info {
@@ -434,6 +445,68 @@ struct ixgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
};
+/* Struct to store Traffic Manager shaper profile. */
+struct ixgbe_tm_shaper_profile {
+ TAILQ_ENTRY(ixgbe_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum ixgbe_tm_node_type {
+ IXGBE_TM_NODE_TYPE_PORT,
+ IXGBE_TM_NODE_TYPE_TC,
+ IXGBE_TM_NODE_TYPE_QUEUE,
+ IXGBE_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct ixgbe_tm_node {
+ TAILQ_ENTRY(ixgbe_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ uint16_t no;
+ struct ixgbe_tm_node *parent;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node);
+
+/* The configuration of Traffic Manager */
+struct ixgbe_tm_conf {
+ struct ixgbe_shaper_profile_list shaper_profile_list;
+ struct ixgbe_tm_node *root; /* root node - port */
+ struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */
+ struct ixgbe_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
/*
* Structure to store private data for each driver instance (for each port).
*/
@@ -450,9 +523,9 @@ struct ixgbe_adapter {
struct ixgbe_mirror_info mr_data;
struct ixgbe_vf_info *vfdata;
struct ixgbe_uta_info uta_info;
-#ifdef RTE_NIC_BYPASS
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
struct ixgbe_bypass_info bps;
-#endif /* RTE_NIC_BYPASS */
+#endif /* RTE_LIBRTE_IXGBE_BYPASS */
struct ixgbe_filter_info filter;
struct ixgbe_l2_tn_info l2_tn;
struct ixgbe_bw_conf bw_conf;
@@ -462,11 +535,9 @@ struct ixgbe_adapter {
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
struct rte_timecounter tx_tstamp_tc;
+ struct ixgbe_tm_conf tm_conf;
};
-#define IXGBE_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
-
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
@@ -512,6 +583,9 @@ struct ixgbe_adapter {
#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
(&((struct ixgbe_adapter *)adapter)->bw_conf)
+#define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->tm_conf)
+
/*
* RX/TX function prototypes
*/
@@ -624,6 +698,8 @@ void ixgbe_filterlist_flush(void);
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset);
int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
struct ixgbe_fdir_rule *rule,
bool del, bool update);
@@ -671,6 +747,11 @@ int ixgbe_vt_check(struct ixgbe_hw *hw);
int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
bool is_ixgbe_supported(struct rte_eth_dev *dev);
+int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
+void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
+int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t tx_rate);
static inline int
ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 7f6c7b58..eb2d5581 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -302,7 +302,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
* mask VM pool and DIPv6 since there are currently not supported
* mask FLEX byte, it will be set in flex_conf
*/
- uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
+ uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
uint32_t fdirtcpm; /* TCP source and destination port masks. */
uint32_t fdiripv6m; /* IPv6 source and destination masks. */
volatile uint32_t *reg;
@@ -333,6 +333,10 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* flex byte mask */
+ if (info->mask.flex_bytes_mask == 0)
+ fdirm |= IXGBE_FDIRM_FLEX;
+
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
@@ -533,6 +537,31 @@ ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
return -ENOTSUP;
}
+int
+ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fdirctrl;
+ int i;
+
+ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
+ fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
+ fdirctrl |= ((offset >> 1) /* convert to word offset */
+ << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+ return 0;
+}
+
static int
fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
@@ -654,7 +683,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
/*
* The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
+ * initialized to zero for non DCB mode otherwise actual total RX PB
* would be bigger than programmed and filter space would run into
* the PB 0 region.
*/
@@ -1243,7 +1272,9 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
(rule->ixgbe_fdir.formatted.flow_type ==
- IXGBE_ATR_FLOW_TYPE_IPV4) &&
+ IXGBE_ATR_FLOW_TYPE_IPV4 ||
+ rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV6) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 9aeb71e4..d6796088 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -56,7 +56,6 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -78,23 +77,40 @@
#define IXGBE_MIN_N_TUPLE_PRIO 1
#define IXGBE_MAX_N_TUPLE_PRIO 7
-#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
- do { \
- item = pattern + index;\
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
- index++; \
- item = pattern + index; \
- } \
- } while (0)
-
-#define NEXT_ITEM_OF_ACTION(act, actions, index)\
- do { \
- act = actions + index; \
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
- index++; \
- act = actions + index; \
- } \
- } while (0)
+#define IXGBE_MAX_FLX_SOURCE_OFF 62
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
/**
* Please aware there's an asumption for all the parsers.
@@ -144,7 +160,6 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error,
@@ -166,11 +181,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
-
/* the first not void item can be MAC or IPv4 */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
@@ -198,8 +210,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
/* check if the next not void item is IPv4 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
@@ -252,11 +263,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->proto = ipv4_spec->hdr.next_proto_id;
/* check if the next not void item is TCP or UDP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
- item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -265,7 +276,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* get the TCP/UDP info */
- if (!item->spec || !item->mask) {
+ if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+ (!item->spec || !item->mask)) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -345,7 +357,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
udp_spec = (const struct rte_flow_item_udp *)item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
- } else {
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
/**
@@ -368,11 +380,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
+ } else {
+ goto action;
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -381,14 +394,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
+action:
/**
* n-tuple only supports forwarding,
* check if the first not void action is QUEUE.
*/
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -400,8 +412,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
((const struct rte_flow_action_queue *)act->conf)->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -482,9 +493,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
- filter->priority > IXGBE_5TUPLE_MAX_PRI ||
- filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ if (filter->queue >= dev->data->nb_rx_queues)
return -rte_errno;
/* fixed value for ixgbe */
@@ -520,7 +529,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -543,15 +551,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* Parse pattern */
- index = 0;
-
+ item = next_no_void_pattern(pattern, NULL);
/* The first non-void item should be MAC. */
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -610,12 +611,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
/* Check if the next non-void item is END. */
- index++;
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -625,13 +621,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* Parse action */
- index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
rte_flow_error_set(error, EINVAL,
@@ -648,12 +638,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Check if the next non-void item is END */
- index++;
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -725,7 +710,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ if (filter->queue >= dev->data->nb_rx_queues) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -793,7 +778,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -816,11 +800,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* the first not void item should be MAC or IPv4 or IPv6 or TCP */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -849,8 +831,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is IPv4 or IPv6 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
rte_flow_error_set(error, EINVAL,
@@ -872,8 +853,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is TCP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -917,8 +897,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -927,11 +906,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -951,8 +927,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -1012,6 +987,9 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
ret = cons_parse_syn_filter(attr, pattern,
actions, filter, error);
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
if (ret)
return ret;
@@ -1048,7 +1026,6 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_e_tag *e_tag_mask;
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1070,11 +1047,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
NULL, "NULL attribute.");
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* The first not void item should be e-tag. */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1121,8 +1096,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1159,11 +1133,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1176,8 +1147,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->pool = act_q->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1213,6 +1183,9 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
+ if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
return ret;
}
@@ -1226,7 +1199,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_mark *mark;
- uint32_t index;
/* parse attr */
/* must be input direction */
@@ -1256,11 +1228,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE or DROP. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1274,12 +1243,19 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
act_q = (const struct rte_flow_action_queue *)act->conf;
rule->queue = act_q->index;
} else { /* drop */
+ /* signature mode does not support drop action. */
+ if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
rule->fdirflags = IXGBE_FDIRCMD_DROP;
}
/* check if the next not void item is MARK */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
(act->type != RTE_FLOW_ACTION_TYPE_END)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1294,8 +1270,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
mark = (const struct rte_flow_action_mark *)act->conf;
rule->soft_id = mark->id;
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
}
/* check if the next not void item is END */
@@ -1310,14 +1285,78 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return 0;
}
+/* search next no void pattern and skip fuzzy */
+static inline
+const struct rte_flow_item *next_no_fuzzy_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_fuzzy *spec, *last, *mask;
+ const struct rte_flow_item *item;
+ uint32_t sh, lh, mh;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+ spec =
+ (const struct rte_flow_item_fuzzy *)item->spec;
+ last =
+ (const struct rte_flow_item_fuzzy *)item->last;
+ mask =
+ (const struct rte_flow_item_fuzzy *)item->mask;
+
+ if (!spec || !mask)
+ return 0;
+
+ sh = spec->thresh;
+
+ if (!last)
+ lh = sh;
+ else
+ lh = last->thresh;
+
+ mh = mask->thresh;
+ sh = sh & mh;
+ lh = lh & mh;
+
+ if (!sh || sh > lh)
+ return 0;
+
+ return 1;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
/**
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
* UDP/TCP/SCTP PATTERN:
- * The first not void item can be ETH or IPV4.
- * The second not void item must be IPV4 if the first one is ETH.
- * The third not void item must be UDP or TCP or SCTP.
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
+ * A Fuzzy Match pattern can appear at any place before END.
+ * Fuzzy Match is optional for IPV4 but is required for IPV6
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1334,6 +1373,14 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
* dst_addr 192.167.3.50 0xFFFFFFFF
* UDP/TCP/SCTP src_port 80 0xFFFF
* dst_port 80 0xFFFF
+ * FLEX relative 0 0x1
+ * search 0 0x1
+ * reserved 0 0
+ * offset 12 0xFFFFFFFF
+ * limit 0 0xFFFF
+ * length 2 0xFFFF
+ * pattern[0] 0x86 0xFF
+ * pattern[1] 0xDD 0xFF
* END
* MAC VLAN pattern example:
* ITEM Spec Mask
@@ -1346,7 +1393,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
* Item->last should be NULL.
*/
static int
-ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct ixgbe_fdir_rule *rule,
@@ -1357,6 +1405,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_item_udp *udp_spec;
@@ -1365,8 +1415,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_sctp *sctp_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_item_raw *raw_spec;
+ uint8_t j;
- uint32_t index, j;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1396,17 +1449,16 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
-
- /* parse pattern */
- index = 0;
+ rule->mask.flex_bytes_mask = 0;
/**
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -1417,7 +1469,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
- rule->mode = RTE_FDIR_MODE_PERFECT;
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
/*Not supported last point for range*/
if (item->last) {
@@ -1454,14 +1509,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->mask) {
- /* If ethernet has meaning, it means MAC VLAN mode. */
- rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
rule->b_mask = TRUE;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
/* Ether type should be masked. */
- if (eth_mask->type) {
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1469,6 +1523,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
/**
* src MAC address must be masked,
* and don't support dst MAC address mask.
@@ -1497,8 +1554,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1544,18 +1600,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
/* More than one tags are not supported. */
- /**
- * Check if the next not void item is not vlan.
- */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* Next not void item must be END */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1564,7 +1611,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
- /* Get the IP info. */
+ /* Get the IPV4 info. */
if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
/**
* Set the flow type even if there's no content
@@ -1624,12 +1671,104 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
- item->type != RTE_FLOW_ITEM_TYPE_END) {
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV6;
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1644,8 +1783,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_TCPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_TCP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1690,6 +1829,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->ixgbe_fdir.formatted.dst_port =
tcp_spec->hdr.dst_port;
}
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
}
/* Get the UDP info */
@@ -1698,8 +1848,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_UDPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_UDP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1739,6 +1889,17 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->ixgbe_fdir.formatted.dst_port =
udp_spec->hdr.dst_port;
}
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
}
/* Get the SCTP info */
@@ -1747,8 +1908,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_SCTP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1756,46 +1917,147 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
item, "Not supported last point for range");
return -rte_errno;
}
- /**
- * Only care about src & dst ports,
- * others should be masked.
- */
- if (!item->mask) {
+
+ /* only x550 family only support sctp port */
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a) {
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ sctp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ /* others even sctp port is not supported */
+ } else {
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask &&
+ (sctp_mask->hdr.src_port ||
+ sctp_mask->hdr.dst_port ||
+ sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by fdir filter");
return -rte_errno;
}
- rule->b_mask = TRUE;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
- if (sctp_mask->hdr.tag ||
- sctp_mask->hdr.cksum) {
+ }
+
+ /* Get the flex byte info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* mask should not be null */
+ if (!item->mask || !item->spec) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by fdir filter");
return -rte_errno;
}
- rule->mask.src_port_mask = sctp_mask->hdr.src_port;
- rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
- if (item->spec) {
- rule->b_spec = TRUE;
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
- rule->ixgbe_fdir.formatted.src_port =
- sctp_spec->hdr.src_port;
- rule->ixgbe_fdir.formatted.dst_port =
- sctp_spec->hdr.dst_port;
+ raw_mask = (const struct rte_flow_item_raw *)item->mask;
+
+ /* check mask */
+ if (raw_mask->relative != 0x1 ||
+ raw_mask->search != 0x1 ||
+ raw_mask->reserved != 0x0 ||
+ (uint32_t)raw_mask->offset != 0xffffffff ||
+ raw_mask->limit != 0xffff ||
+ raw_mask->length != 0xffff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
}
+
+ raw_spec = (const struct rte_flow_item_raw *)item->spec;
+
+ /* check spec */
+ if (raw_spec->relative != 0 ||
+ raw_spec->search != 0 ||
+ raw_spec->reserved != 0 ||
+ raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+ raw_spec->offset % 2 ||
+ raw_spec->limit != 0 ||
+ raw_spec->length != 2 ||
+ /* pattern can't be 0xffff */
+ (raw_spec->pattern[0] == 0xff &&
+ raw_spec->pattern[1] == 0xff)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check pattern mask */
+ if (raw_mask->pattern[0] != 0xff ||
+ raw_mask->pattern[1] != 0xff) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mask.flex_bytes_mask = 0xffff;
+ rule->ixgbe_fdir.formatted.flex_bytes =
+ (((uint16_t)raw_spec->pattern[1]) << 8) |
+ raw_spec->pattern[0];
+ rule->flex_bytes_offset = raw_spec->offset;
}
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1863,7 +2125,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1894,14 +2156,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or IPv6 or UDP or VxLAN.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -1927,7 +2186,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
item, "Not supported by fdir filter");
return -rte_errno;
}
- /*Not supported last point for range*/
+ /* Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -1936,8 +2195,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is IPv4 or IPv6. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1968,8 +2226,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is UDP or NVGRE. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1999,8 +2256,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is VxLAN. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2156,8 +2412,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MAC */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2240,8 +2495,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2277,8 +2531,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2316,7 +2569,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
hw->mac.type != ixgbe_mac_X550EM_a)
return -ENOTSUP;
- ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
+ ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
actions, rule, error);
if (!ret)
@@ -2325,10 +2578,24 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
actions, rule, error);
+ if (ret)
+ return ret;
+
step_next:
+
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ rule->fdirflags == IXGBE_FDIRCMD_DROP &&
+ (rule->ixgbe_fdir.formatted.src_port != 0 ||
+ rule->ixgbe_fdir.formatted.dst_port != 0))
+ return -ENOTSUP;
+
if (fdir_mode == RTE_FDIR_MODE_NONE ||
fdir_mode != rule->mode)
return -ENOTSUP;
+
+ if (rule->queue >= dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
return ret;
}
@@ -2414,6 +2681,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ uint8_t first_mask = FALSE;
flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
if (!flow) {
@@ -2505,11 +2773,19 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
rte_memcpy(&fdir_info->mask,
&fdir_rule.mask,
sizeof(struct ixgbe_hw_fdir_mask));
+ fdir_info->flex_bytes_offset =
+ fdir_rule.flex_bytes_offset;
+
+ if (fdir_rule.mask.flex_bytes_mask)
+ ixgbe_fdir_set_flexbytes_offset(dev,
+ fdir_rule.flex_bytes_offset);
+
ret = ixgbe_fdir_set_input_mask(dev);
if (ret)
goto out;
fdir_info->mask_added = TRUE;
+ first_mask = TRUE;
} else {
/**
* Only support one global mask,
@@ -2520,6 +2796,10 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
sizeof(struct ixgbe_hw_fdir_mask));
if (ret)
goto out;
+
+ if (fdir_info->flex_bytes_offset !=
+ fdir_rule.flex_bytes_offset)
+ goto out;
}
}
@@ -2540,8 +2820,15 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
return flow;
}
- if (ret)
+ if (ret) {
+ /**
+ * clean the mask_added flag if fail to
+ * program
+ **/
+ if (first_mask)
+ fdir_info->mask_added = FALSE;
goto out;
+ }
}
goto out;
@@ -2583,7 +2870,7 @@ out:
* the HW. Because there can be no enough room for the rule.
*/
static int
-ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ixgbe_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -2774,9 +3061,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev,
}
const struct rte_flow_ops ixgbe_flow_ops = {
- ixgbe_flow_validate,
- ixgbe_flow_create,
- ixgbe_flow_destroy,
- ixgbe_flow_flush,
- NULL,
+ .validate = ixgbe_flow_validate,
+ .create = ixgbe_flow_create,
+ .destroy = ixgbe_flow_destroy,
+ .flush = ixgbe_flow_flush,
};
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index d88832e5..c0d86c76 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -61,7 +61,7 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
return pci_dev->max_vfs;
}
@@ -511,7 +511,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
}
static int
-ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
@@ -683,7 +683,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_pmd_ixgbe_mb_event_param cb_param;
+ struct rte_pmd_ixgbe_mb_event_param ret_param;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
if (retval) {
@@ -702,10 +702,10 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
* initialise structure to send to user application
* will return response from user in retval field
*/
- cb_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
- cb_param.vfid = vf;
- cb_param.msg_type = msgbuf[0] & 0xFFFF;
- cb_param.msg = (void *)msgbuf;
+ ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
+ ret_param.vfid = vf;
+ ret_param.msg_type = msgbuf[0] & 0xFFFF;
+ ret_param.msg = (void *)msgbuf;
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
@@ -714,20 +714,22 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
vfinfo[vf].clear_to_send = true;
/* notify application about VF reset */
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
return ret;
}
/**
* ask user application if we allowed to perform those functions
- * if we get cb_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
+ * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
* then business as usual,
* if 0, do nothing and send ACK to VF
- * if cb_param.retval > 1, do nothing and send NAK to VF
+ * if ret_param.retval > 1, do nothing and send NAK to VF
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
+ NULL, &ret_param);
- retval = cb_param.retval;
+ retval = ret_param.retval;
/* check & process VF to PF mailbox message */
switch ((msgbuf[0] & 0xFFFF)) {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 1e078959..64bff258 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -126,7 +126,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
* Check for descriptors with their DD bit set and free mbufs.
* Return the total number of buffers freed.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
struct ixgbe_tx_entry *txep;
@@ -1084,282 +1084,279 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
-#define IXGBE_PACKET_TYPE_MAX 0X80
-#define IXGBE_PACKET_TYPE_TN_MAX 0X100
-#define IXGBE_PACKET_TYPE_SHIFT 0X04
+/**
+ * Use 2 different table for normal packet and tunnel packet
+ * to save the space.
+ */
+const uint32_t
+ ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
+ [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+ RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+};
+
+const uint32_t
+ ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+
+ [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+};
/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
static inline uint32_t
ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
{
- /**
- * Use 2 different table for normal packet and tunnel packet
- * to save the space.
- */
- static const uint32_t
- ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
- [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
- [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4,
- [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
- RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
- };
-
- static const uint32_t
- ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
- [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_UDP,
-
- [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
- RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
- RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
- };
if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
return RTE_PTYPE_UNKNOWN;
@@ -4111,10 +4108,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
return -ENOMEM;
}
- rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->next = NULL;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
dma_addr =
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 1ffab4cc..85feb0bd 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -87,6 +87,10 @@
#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
#define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000
+#define IXGBE_PACKET_TYPE_MAX 0X80
+#define IXGBE_PACKET_TYPE_TN_MAX 0X100
+#define IXGBE_PACKET_TYPE_SHIFT 0X04
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -309,6 +313,9 @@ int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
+extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
+extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
+
#ifdef RTE_IXGBE_INC_VECTOR
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 1c34bb5f..9fc112b1 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -101,7 +101,7 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
return pkt_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
struct ixgbe_tx_entry_v *txep;
@@ -158,7 +158,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
return txq->tx_rs_thresh;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index a7bc199f..e704a7f3 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -87,6 +87,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
mb1 = rxep[1].mbuf;
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
@@ -214,32 +216,84 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
* appropriate flags means that we have to do a shift and blend for
* each mbuf before we do the write.
*/
-#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
-
rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
-#else
- rearm0 = _mm_slli_si128(vtag1, 14);
- rearm1 = _mm_slli_si128(vtag1, 12);
- rearm2 = _mm_slli_si128(vtag1, 10);
- rearm3 = _mm_slli_si128(vtag1, 8);
-
- rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48));
- rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48));
- rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48));
- rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48));
-
-#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
}
+static inline uint32_t get_packet_type(int index,
+ uint32_t pkt_info,
+ uint32_t etqf_check,
+ uint32_t tunnel_check)
+{
+ if (etqf_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP)))
+ return RTE_PTYPE_UNKNOWN;
+
+ if (tunnel_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) {
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
+ return ptype_table_tn[pkt_info];
+ }
+
+ pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
+ return ptype_table[pkt_info];
+}
+
+static inline void
+desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask,
+ struct rte_mbuf **rx_pkts)
+{
+ __m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL);
+ __m128i ptype_mask = _mm_set_epi32(
+ pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask);
+ __m128i tunnel_mask =
+ _mm_set_epi64x(0x100000001000LL, 0x100000001000LL);
+
+ uint32_t etqf_check, tunnel_check, pkt_info;
+
+ __m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]);
+ __m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]);
+
+ /* interleave low 32 bits,
+ * now we have 4 ptypes in a XMM register
+ */
+ ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
+
+ /* create a etqf bitmask based on the etqf bit. */
+ etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask));
+
+ /* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
+ ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
+ ptype_mask);
+
+ /* create a tunnel bitmask based on the tunnel bit */
+ tunnel_check = _mm_movemask_epi8(
+ _mm_slli_epi32(_mm_and_si128(ptype0, tunnel_mask), 0x3));
+
+ pkt_info = _mm_extract_epi32(ptype0, 0);
+ rx_pkts[0]->packet_type =
+ get_packet_type(0, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 1);
+ rx_pkts[1]->packet_type =
+ get_packet_type(1, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 2);
+ rx_pkts[2]->packet_type =
+ get_packet_type(2, pkt_info, etqf_check, tunnel_check);
+ pkt_info = _mm_extract_epi32(ptype0, 3);
+ rx_pkts[3]->packet_type =
+ get_packet_type(3, pkt_info, etqf_check, tunnel_check);
+}
+
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
*
@@ -266,6 +320,15 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-rxq->crc_len, /* sub crc on pkt_len */
0, 0 /* ignore pkt_type field */
);
+ /*
+ * compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
__m128i dd_check, eop_check;
__m128i mbuf_init;
uint8_t vlan_flags;
@@ -312,6 +375,19 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF, /* skip 32 bit pkt_type */
0xFF, 0xFF
);
+ /*
+ * Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
@@ -447,6 +523,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
+ desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
+
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
new file mode 100644
index 00000000..cdcf45cb
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -0,0 +1,1043 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+
+static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops ixgbe_tm_ops = {
+ .capabilities_get = ixgbe_tm_capabilities_get,
+ .shaper_profile_add = ixgbe_shaper_profile_add,
+ .shaper_profile_delete = ixgbe_shaper_profile_del,
+ .node_add = ixgbe_node_add,
+ .node_delete = ixgbe_node_delete,
+ .node_type_get = ixgbe_node_type_get,
+ .level_capabilities_get = ixgbe_level_capabilities_get,
+ .node_capabilities_get = ixgbe_node_capabilities_get,
+ .hierarchy_commit = ixgbe_hierarchy_commit,
+};
+
+int
+ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &ixgbe_tm_ops;
+
+ return 0;
+}
+
+void
+ixgbe_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&tm_conf->shaper_profile_list);
+
+ /* initialize node configuration */
+ tm_conf->root = NULL;
+ TAILQ_INIT(&tm_conf->queue_list);
+ TAILQ_INIT(&tm_conf->tc_list);
+ tm_conf->nb_tc_node = 0;
+ tm_conf->nb_queue_node = 0;
+ tm_conf->committed = false;
+}
+
+void
+ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct ixgbe_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_tc_node = 0;
+ if (tm_conf->root) {
+ rte_free(tm_conf->root);
+ tm_conf->root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint8_t
+ixgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *eth_conf;
+ uint8_t nb_tcs = 0;
+
+ eth_conf = &dev->data->dev_conf;
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ return nb_tcs;
+}
+
+static int
+ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t tc_nb = ixgbe_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * here is the max capability not the current configuration.
+ */
+ /* port + TCs + queues */
+ cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
+ hw->mac.max_tx_queues;
+ cap->n_levels_max = 3;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->mac.max_tx_queues;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct ixgbe_tm_shaper_profile *
+ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_shaper_profile_list *shaper_profile_list =
+ &tm_conf->shaper_profile_list;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = ixgbe_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
+ sizeof(struct ixgbe_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ (void)rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct ixgbe_tm_node *
+ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
+ enum ixgbe_tm_node_type *node_type)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_node *tm_node;
+
+ if (tm_conf->root && tm_conf->root->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_PORT;
+ return tm_conf->root;
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
+ uint16_t *base, uint16_t *nb)
+{
+ uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t vf_num = pci_dev->max_vfs;
+
+ *base = 0;
+ *nb = 0;
+
+ /* VT on */
+ if (vf_num) {
+ /* no DCB */
+ if (nb_tcs == 1) {
+ if (vf_num >= ETH_32_POOLS) {
+ *nb = 2;
+ *base = vf_num * 2;
+ } else if (vf_num >= ETH_16_POOLS) {
+ *nb = 4;
+ *base = vf_num * 4;
+ } else {
+ *nb = 8;
+ *base = vf_num * 8;
+ }
+ } else {
+ /* DCB */
+ *nb = 1;
+ *base = vf_num * nb_tcs + tc_node_no;
+ }
+ } else {
+ /* VT off */
+ if (nb_tcs == ETH_8_TCS) {
+ switch (tc_node_no) {
+ case 0:
+ *base = 0;
+ *nb = 32;
+ break;
+ case 1:
+ *base = 32;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 64;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 80;
+ *nb = 16;
+ break;
+ case 4:
+ *base = 96;
+ *nb = 8;
+ break;
+ case 5:
+ *base = 104;
+ *nb = 8;
+ break;
+ case 6:
+ *base = 112;
+ *nb = 8;
+ break;
+ case 7:
+ *base = 120;
+ *nb = 8;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (tc_node_no) {
+ /**
+ * If no VF and no DCB, only 64 queues can be used.
+ * This case also be covered by this "case 0".
+ */
+ case 0:
+ *base = 0;
+ *nb = 64;
+ break;
+ case 1:
+ *base = 64;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 96;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 112;
+ *nb = 16;
+ break;
+ default:
+ return;
+ }
+ }
+ }
+}
+
+static int
+ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for root node */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check the unsupported parameters */
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for TC or queue node */
+ /* check the unsupported parameters */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct ixgbe_tm_node *tm_node;
+ struct ixgbe_tm_node *parent_node;
+ uint8_t nb_tcs;
+ uint16_t q_base = 0;
+ uint16_t q_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ shaper_profile = ixgbe_shaper_profile_search(dev,
+ params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > IXGBE_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (tm_conf->root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("ixgbe_tm_node",
+ sizeof(struct ixgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->no = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ tm_conf->root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = ixgbe_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
+ parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ /* check TC number */
+ nb_tcs = ixgbe_tc_nb_get(dev);
+ if (tm_conf->nb_tc_node >= nb_tcs) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check queue number */
+ if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
+ if (parent_node->reference_count >= q_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues than TC supported";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("ixgbe_tm_node",
+ sizeof(struct ixgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = parent_node;
+ tm_node->shaper_profile = shaper_profile;
+ (void)rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ tm_node->no = parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->tc_list,
+ tm_node, node);
+ tm_conf->nb_tc_node++;
+ } else {
+ tm_node->no = q_base + parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->queue_list,
+ tm_node, node);
+ tm_conf->nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check the if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ tm_conf->root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == IXGBE_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ tm_conf->nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ tm_conf->nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == IXGBE_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = true;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->mac.max_tx_queues;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+ cap->non_leaf_nodes_identical = true;
+ }
+ cap->leaf_nodes_identical = true;
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->leaf.shaper_private_rate_max = 1250000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
+ struct ixgbe_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == IXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct ixgbe_tm_conf *tm_conf =
+ IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
+ struct ixgbe_tm_node *tm_node;
+ uint64_t bw;
+ int ret;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!tm_conf->root)
+ goto done;
+
+ /* not support port max bandwidth yet */
+ if (tm_conf->root->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port max bandwidth";
+ goto fail_clear;
+ }
+
+ /* HW not support TC max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no TC max bandwidth";
+ goto fail_clear;
+ }
+ }
+
+ /* queue max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ if (bw) {
+ /* interpret Bps to Mbps */
+ bw = bw * 8 / 1000 / 1000;
+ ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message =
+ "failed to set queue max bandwidth";
+ goto fail_clear;
+ }
+ }
+ }
+
+done:
+ tm_conf->committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ ixgbe_tm_conf_uninit(dev);
+ ixgbe_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index e8fc9a64..79897ff6 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -51,7 +51,7 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -84,7 +84,7 @@ rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -115,7 +115,7 @@ rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -145,7 +145,7 @@ rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -174,7 +174,7 @@ rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -270,7 +270,7 @@ rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -306,7 +306,7 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!is_ixgbe_supported(dev))
@@ -354,7 +354,7 @@ rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -401,7 +401,7 @@ rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -452,7 +452,7 @@ rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
@@ -908,3 +908,136 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
return 0;
}
+
+#ifdef RTE_LIBRTE_IXGBE_BYPASS
+int
+rte_pmd_ixgbe_bypass_init(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ ixgbe_bypass_init(dev);
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_state_show(dev, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_state_store(dev, new_state);
+}
+
+int
+rte_pmd_ixgbe_bypass_event_show(uint8_t port_id,
+ uint32_t event,
+ uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_event_show(dev, event, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_event_store(uint8_t port_id,
+ uint32_t event,
+ uint32_t state)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_event_store(dev, event, state);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_timeout_store(dev, timeout);
+}
+
+int
+rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_ver_show(dev, ver);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_timeout_show(dev, wd_timeout);
+}
+
+int
+rte_pmd_ixgbe_bypass_wd_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_bypass_wd_reset(dev);
+}
+#endif
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 1f2b1bd7..d33c285d 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -427,6 +427,177 @@ int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
uint8_t tc_num,
uint8_t *bw_weight);
+
+/**
+ * Initialize bypass logic. This function needs to be called before
+ * executing any other bypass API.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_init(uint8_t port);
+
+/**
+ * Return bypass state.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param state
+ * The return bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state);
+
+/**
+ * Set bypass state
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param new_state
+ * The current bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state);
+
+/**
+ * Return bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The bypass state when given event occurred.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_event_show(uint8_t port,
+ uint32_t event,
+ uint32_t *state);
+
+/**
+ * Set bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The assigned state when given event occurs.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_event_store(uint8_t port,
+ uint32_t event,
+ uint32_t state);
+
+/**
+ * Set bypass watchdog timeout count.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param timeout
+ * The timeout to be set.
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout);
+
+/**
+ * Get bypass firmware version.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param ver
+ * The firmware version
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver);
+
+/**
+ * Return bypass watchdog timeout in seconds
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param wd_timeout
+ * The return watchdog timeout. "0" represents timer expired
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
+
+/**
+ * Reset bypass watchdog timer
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_bypass_wd_reset(uint8_t port);
+
+
/**
* Response sent back to ixgbe driver from user app after callback
*/
@@ -446,4 +617,48 @@ struct rte_pmd_ixgbe_mb_event_param {
uint16_t retval; /**< return value */
void *msg; /**< pointer to message */
};
+enum {
+ RTE_PMD_IXGBE_BYPASS_MODE_NONE,
+ RTE_PMD_IXGBE_BYPASS_MODE_NORMAL,
+ RTE_PMD_IXGBE_BYPASS_MODE_BYPASS,
+ RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE,
+ RTE_PMD_IXGBE_BYPASS_MODE_NUM,
+};
+
+#define RTE_PMD_IXGBE_BYPASS_MODE_VALID(x) \
+ ((x) > RTE_PMD_IXGBE_BYPASS_MODE_NONE && \
+ (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_IXGBE_BYPASS_EVENT_NONE,
+ RTE_PMD_IXGBE_BYPASS_EVENT_START,
+ RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON = RTE_PMD_IXGBE_BYPASS_EVENT_START,
+ RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON,
+ RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF,
+ RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF,
+ RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT,
+ RTE_PMD_IXGBE_BYPASS_EVENT_NUM
+};
+
+#define RTE_PMD_IXGBE_BYPASS_EVENT_VALID(x) \
+ ((x) > RTE_PMD_IXGBE_BYPASS_EVENT_NONE && \
+ (x) < RTE_PMD_IXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_IXGBE_BYPASS_TMT_OFF, /* timeout disabled. */
+ RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
+ RTE_PMD_IXGBE_BYPASS_TMT_NUM
+};
+
+#define RTE_PMD_IXGBE_BYPASS_TMT_VALID(x) \
+ ((x) == RTE_PMD_IXGBE_BYPASS_TMT_OFF || \
+ ((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \
+ (x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM))
+
#endif /* _PMD_IXGBE_H_ */
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 45a57e33..bf776742 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -38,3 +38,17 @@ DPDK_17.05 {
rte_pmd_ixgbe_ping_vf;
rte_pmd_ixgbe_set_tc_bw_alloc;
} DPDK_17.02;
+
+DPDK_17.08 {
+ global:
+
+ rte_pmd_ixgbe_bypass_event_show;
+ rte_pmd_ixgbe_bypass_event_store;
+ rte_pmd_ixgbe_bypass_init;
+ rte_pmd_ixgbe_bypass_state_set;
+ rte_pmd_ixgbe_bypass_state_show;
+ rte_pmd_ixgbe_bypass_ver_show;
+ rte_pmd_ixgbe_bypass_wd_reset;
+ rte_pmd_ixgbe_bypass_wd_timeout_show;
+ rte_pmd_ixgbe_bypass_wd_timeout_store;
+} DPDK_17.05;
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index f688d919..72a2733b 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -145,7 +145,7 @@ eth_kni_start(struct rte_eth_dev *dev)
uint16_t port_id = dev->data->port_id;
struct rte_mempool *mb_pool;
struct rte_kni_conf conf;
- const char *name = dev->data->name + 4; /* remove net_ */
+ const char *name = dev->device->name + 4; /* remove net_ */
snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", name);
conf.force_bind = 0;
diff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h
index 67eaa452..de58c7cc 100644
--- a/drivers/net/liquidio/base/lio_hw_defs.h
+++ b/drivers/net/liquidio/base/lio_hw_defs.h
@@ -42,6 +42,12 @@
#define LIO_CN23XX_VF_VID 0x9712
+/* CN23xx subsystem device ids */
+#define PCI_SUBSYS_DEV_ID_CN2350_210 0x0004
+#define PCI_SUBSYS_DEV_ID_CN2360_210 0x0005
+#define PCI_SUBSYS_DEV_ID_CN2360_225 0x0006
+#define PCI_SUBSYS_DEV_ID_CN2350_225 0x0007
+
/* --------------------------CONFIG VALUES------------------------ */
/* CN23xx IQ configuration macros */
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index c7f1fb64..479936a5 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -395,6 +395,25 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ devinfo->pci_dev = pci_dev;
+
+ switch (pci_dev->id.subsystem_device_id) {
+ /* CN23xx 10G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_210:
+ case PCI_SUBSYS_DEV_ID_CN2360_210:
+ devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ break;
+ /* CN23xx 25G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_225:
+ case PCI_SUBSYS_DEV_ID_CN2360_225:
+ devinfo->speed_capa = ETH_LINK_SPEED_25G;
+ break;
+ default:
+ lio_dev_err(lio_dev,
+ "Unknown CN23XX subsystem device id. Not setting speed capability.\n");
+ }
devinfo->max_rx_queues = lio_dev->max_rx_queues;
devinfo->max_tx_queues = lio_dev->max_tx_queues;
@@ -1977,7 +1996,7 @@ lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
static int
lio_eth_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct lio_device *lio_dev = LIO_DEV(eth_dev);
PMD_INIT_FUNC_TRACE();
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
index 9533015c..2bbb893c 100644
--- a/drivers/net/liquidio/lio_rxtx.c
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -81,28 +81,6 @@ lio_droq_destroy_ring_buffers(struct lio_droq *droq)
lio_droq_reset_indices(droq);
}
-static void *
-lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
-{
- struct lio_droq *droq = lio_dev->droq[q_no];
- struct rte_mempool *mpool = droq->mpool;
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mpool);
- if (m == NULL) {
- lio_dev_err(lio_dev, "Cannot allocate\n");
- return NULL;
- }
-
- rte_mbuf_refcnt_set(m, 1);
- m->next = NULL;
- m->data_off = RTE_PKTMBUF_HEADROOM;
- m->nb_segs = 1;
- m->pool = mpool;
-
- return m;
-}
-
static int
lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
struct lio_droq *droq)
@@ -112,7 +90,7 @@ lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
void *buf;
for (i = 0; i < droq->max_count; i++) {
- buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ buf = rte_pktmbuf_alloc(droq->mpool);
if (buf == NULL) {
lio_dev_err(lio_dev, "buffer alloc failed\n");
droq->stats.rx_alloc_failure++;
@@ -378,7 +356,6 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq,
/* lio_droq_refill
*
- * @param lio_dev - pointer to the lio device structure
* @param droq - droq in which descriptors require new buffers.
*
* Description:
@@ -394,7 +371,7 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq,
* This routine is called with droq->lock held.
*/
static uint32_t
-lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
+lio_droq_refill(struct lio_droq *droq)
{
struct lio_droq_desc *desc_ring;
uint32_t desc_refilled = 0;
@@ -407,7 +384,7 @@ lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
* reuse the buffer, else allocate.
*/
if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
- buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ buf = rte_pktmbuf_alloc(droq->mpool);
/* If a buffer could not be allocated, no point in
* continuing
*/
@@ -489,9 +466,6 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
droq->refill_count++;
if (likely(nicbuf != NULL)) {
- nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
- nicbuf->nb_segs = 1;
- nicbuf->next = NULL;
/* We don't have a way to pass flags yet */
nicbuf->ol_flags = 0;
if (rh->r_dh.has_hash) {
@@ -545,9 +519,6 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
if (!pkt_len)
first_buf = nicbuf;
- nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
- nicbuf->nb_segs = 1;
- nicbuf->next = NULL;
nicbuf->port = lio_dev->port_id;
/* We don't have a way to pass
* flags yet
@@ -617,7 +588,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
}
if (droq->refill_count >= droq->refill_threshold) {
- int desc_refilled = lio_droq_refill(lio_dev, droq);
+ int desc_refilled = lio_droq_refill(droq);
/* Flush the droq descriptor data to memory to be sure
* that when we update the credits the data in memory is
diff --git a/drivers/net/liquidio/lio_struct.h b/drivers/net/liquidio/lio_struct.h
index 26f803f9..d9cbf000 100644
--- a/drivers/net/liquidio/lio_struct.h
+++ b/drivers/net/liquidio/lio_struct.h
@@ -94,7 +94,7 @@ struct lio_droq_stats {
/** Num of vxlan packets received; */
uint64_t rx_vxlan;
- /** Num of failures of lio_recv_buffer_alloc() */
+ /** Num of failures of rte_pktmbuf_alloc() */
uint64_t rx_alloc_failure;
};
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index e873fb48..c045bd79 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -40,7 +40,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
# Basic CFLAGS.
CFLAGS += -O3
-CFLAGS += -std=gnu99 -Wall -Wextra
+CFLAGS += -std=c11 -Wall -Wextra
CFLAGS += -g
CFLAGS += -I.
CFLAGS += -D_BSD_SOURCE
@@ -84,6 +84,10 @@ ifdef CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS
CFLAGS += -DMLX4_PMD_SOFT_COUNTERS=$(CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS)
endif
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y)
+CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS
+endif
+
include $(RTE_SDK)/mk/rte.lib.mk
# Generate and clean-up mlx4_autoconf.h.
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index ec4419a8..055de49a 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -75,6 +75,7 @@
#include <rte_memory.h>
#include <rte_flow.h>
#include <rte_kvargs.h>
+#include <rte_interrupts.h>
/* Generated configuration header. */
#include "mlx4_autoconf.h"
@@ -127,6 +128,18 @@ const char *pmd_mlx4_init_params[] = {
NULL,
};
+static int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
+
+static int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
+
+static int
+priv_rx_intr_vec_enable(struct priv *priv);
+
+static void
+priv_rx_intr_vec_disable(struct priv *priv);
+
/**
* Check if running as a secondary process.
*
@@ -533,13 +546,93 @@ txq_cleanup(struct txq *txq);
static int
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, int inactive, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp);
+ unsigned int socket, int inactive,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp, int children_n,
+ struct rxq *rxq_parent);
static void
rxq_cleanup(struct rxq *rxq);
/**
+ * Create RSS parent queue.
+ *
+ * The new parent is inserted in front of the list in the private structure.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param queues
+ * Queues indices array, if NULL use all Rx queues.
+ * @param children_n
+ * The number of entries in queues[].
+ *
+ * @return
+ * Pointer to a parent rxq structure, NULL on failure.
+ */
+struct rxq *
+priv_parent_create(struct priv *priv,
+ uint16_t queues[],
+ uint16_t children_n)
+{
+ int ret;
+ uint16_t i;
+ struct rxq *parent;
+
+ parent = rte_zmalloc("parent queue",
+ sizeof(*parent),
+ RTE_CACHE_LINE_SIZE);
+ if (!parent) {
+ ERROR("cannot allocate memory for RSS parent queue");
+ return NULL;
+ }
+ ret = rxq_setup(priv->dev, parent, 0, 0, 0,
+ NULL, NULL, children_n, NULL);
+ if (ret) {
+ rte_free(parent);
+ return NULL;
+ }
+ parent->rss.queues_n = children_n;
+ if (queues) {
+ for (i = 0; i < children_n; ++i)
+ parent->rss.queues[i] = queues[i];
+ } else {
+ /* the default RSS ring case */
+ assert(priv->rxqs_n == children_n);
+ for (i = 0; i < priv->rxqs_n; ++i)
+ parent->rss.queues[i] = i;
+ }
+ LIST_INSERT_HEAD(&priv->parents, parent, next);
+ return parent;
+}
+
+/**
+ * Clean up RX queue parent structure.
+ *
+ * @param parent
+ * RX queue parent structure.
+ */
+void
+rxq_parent_cleanup(struct rxq *parent)
+{
+ LIST_REMOVE(parent, next);
+ rxq_cleanup(parent);
+ rte_free(parent);
+}
+
+/**
+ * Clean up parent structures from the parent list.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_parent_list_cleanup(struct priv *priv)
+{
+ while (!LIST_EMPTY(&priv->parents))
+ rxq_parent_cleanup(LIST_FIRST(&priv->parents));
+}
+
+/**
* Ethernet device configuration.
*
* Prepare the driver for a given number of TX and RX queues.
@@ -558,7 +651,6 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int rxqs_n = dev->data->nb_rx_queues;
unsigned int txqs_n = dev->data->nb_tx_queues;
unsigned int tmp;
- int ret;
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
@@ -569,7 +661,7 @@ dev_configure(struct rte_eth_dev *dev)
}
if (rxqs_n == priv->rxqs_n)
return 0;
- if (!rte_is_power_of_2(rxqs_n)) {
+ if (!rte_is_power_of_2(rxqs_n) && !priv->isolated) {
unsigned n_active;
n_active = rte_align32pow2(rxqs_n + 1) >> 1;
@@ -588,7 +680,7 @@ dev_configure(struct rte_eth_dev *dev)
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] != NULL)
return EINVAL;
- rxq_cleanup(&priv->rxq_parent);
+ priv_parent_list_cleanup(priv);
priv->rss = 0;
priv->rxqs_n = 0;
}
@@ -613,14 +705,14 @@ dev_configure(struct rte_eth_dev *dev)
priv->rss = 1;
tmp = priv->rxqs_n;
priv->rxqs_n = rxqs_n;
- ret = rxq_setup(dev, &priv->rxq_parent, 0, 0, 0, NULL, NULL);
- if (!ret)
+ if (priv->isolated)
+ return 0;
+ if (priv_parent_create(priv, NULL, priv->rxqs_n))
return 0;
/* Failure, rollback. */
priv->rss = 0;
priv->rxqs_n = tmp;
- assert(ret > 0);
- return ret;
+ return ENOMEM;
}
/**
@@ -1098,7 +1190,7 @@ static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
/* For best performance, this function should not be inlined. */
static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
- __attribute__((noinline));
+ __rte_noinline;
/**
* Register mempool as a memory region.
@@ -2499,11 +2591,12 @@ priv_mac_addr_del(struct priv *priv, unsigned int mac_index)
{
unsigned int i;
+ assert(!priv->isolated);
assert(mac_index < elemof(priv->mac));
if (!BITFIELD_ISSET(priv->mac_configured, mac_index))
return;
if (priv->rss) {
- rxq_mac_addr_del(&priv->rxq_parent, mac_index);
+ rxq_mac_addr_del(LIST_FIRST(&priv->parents), mac_index);
goto end;
}
for (i = 0; (i != priv->dev->data->nb_rx_queues); ++i)
@@ -2570,7 +2663,7 @@ priv_mac_addr_add(struct priv *priv, unsigned int mac_index,
goto end;
}
if (priv->rss) {
- ret = rxq_mac_addr_add(&priv->rxq_parent, mac_index);
+ ret = rxq_mac_addr_add(LIST_FIRST(&priv->parents), mac_index);
if (ret)
return ret;
goto end;
@@ -2748,14 +2841,17 @@ rxq_cleanup(struct rxq *rxq)
rxq->if_cq,
&params));
}
- if (rxq->qp != NULL) {
+ if (rxq->qp != NULL && !rxq->priv->isolated) {
rxq_promiscuous_disable(rxq);
rxq_allmulticast_disable(rxq);
rxq_mac_addrs_del(rxq);
- claim_zero(ibv_destroy_qp(rxq->qp));
}
+ if (rxq->qp != NULL)
+ claim_zero(ibv_destroy_qp(rxq->qp));
if (rxq->cq != NULL)
claim_zero(ibv_destroy_cq(rxq->cq));
+ if (rxq->channel != NULL)
+ claim_zero(ibv_destroy_comp_channel(rxq->channel));
if (rxq->rd != NULL) {
struct ibv_exp_destroy_res_domain_attr attr = {
.comp_mask = 0,
@@ -2987,6 +3083,13 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
NB_SEGS(rep) = 0x2a;
PORT(rep) = 0x2a;
rep->ol_flags = -1;
+ /*
+ * Clear special flags in mbuf to avoid
+ * crashing while freeing.
+ */
+ rep->ol_flags &=
+ ~(uint64_t)(IND_ATTACHED_MBUF |
+ CTRL_MBUF_FLAG);
#endif
assert(rep->buf_len == seg->buf_len);
/* Reconfigure sge to use rep instead of seg. */
@@ -3330,15 +3433,18 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
* Completion queue to associate with QP.
* @param desc
* Number of descriptors in QP (hint only).
- * @param parent
- * If nonzero, create a parent QP, otherwise a child.
+ * @param children_n
+ * If nonzero, a number of children for parent QP and zero for a child.
+ * @param rxq_parent
+ * Pointer for a parent in a child case, NULL otherwise.
*
* @return
* QP pointer or NULL in case of error.
*/
static struct ibv_qp *
rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
- int parent, struct ibv_exp_res_domain *rd)
+ int children_n, struct ibv_exp_res_domain *rd,
+ struct rxq *rxq_parent)
{
struct ibv_exp_qp_init_attr attr = {
/* CQ to be associated with the send queue. */
@@ -3368,16 +3474,16 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
attr.max_inl_recv = priv->inl_recv_size,
attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
#endif
- if (parent) {
+ if (children_n > 0) {
attr.qpg.qpg_type = IBV_EXP_QPG_PARENT;
/* TSS isn't necessary. */
attr.qpg.parent_attrib.tss_child_count = 0;
attr.qpg.parent_attrib.rss_child_count =
- rte_align32pow2(priv->rxqs_n + 1) >> 1;
+ rte_align32pow2(children_n + 1) >> 1;
DEBUG("initializing parent RSS queue");
} else {
attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX;
- attr.qpg.qpg_parent = priv->rxq_parent.qp;
+ attr.qpg.qpg_parent = rxq_parent->qp;
DEBUG("initializing child RSS queue");
}
return ibv_exp_create_qp(priv->ctx, &attr);
@@ -3413,13 +3519,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
struct ibv_recv_wr *bad_wr;
unsigned int mb_len;
int err;
- int parent = (rxq == &priv->rxq_parent);
- if (parent) {
- ERROR("%p: cannot rehash parent queue %p",
- (void *)dev, (void *)rxq);
- return EINVAL;
- }
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
/* Number of descriptors and mbufs currently allocated. */
@@ -3451,7 +3551,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
return 0;
}
/* Remove attached flows if RSS is disabled (no parent queue). */
- if (!priv->rss) {
+ if (!priv->rss && !priv->isolated) {
rxq_allmulticast_disable(&tmpl);
rxq_promiscuous_disable(&tmpl);
rxq_mac_addrs_del(&tmpl);
@@ -3464,6 +3564,8 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
}
/* From now on, any failure will render the queue unusable.
* Reinitialize QP. */
+ if (!tmpl.qp)
+ goto skip_init;
mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RESET };
err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
if (err) {
@@ -3471,12 +3573,6 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
assert(err > 0);
return err;
}
- err = ibv_resize_cq(tmpl.cq, desc_n);
- if (err) {
- ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err));
- assert(err > 0);
- return err;
- }
mod = (struct ibv_exp_qp_attr){
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
@@ -3485,9 +3581,6 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
};
err = ibv_exp_modify_qp(tmpl.qp, &mod,
(IBV_EXP_QP_STATE |
-#ifdef RSS_SUPPORT
- (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
-#endif /* RSS_SUPPORT */
IBV_EXP_QP_PORT));
if (err) {
ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
@@ -3495,8 +3588,15 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
assert(err > 0);
return err;
};
+skip_init:
+ err = ibv_resize_cq(tmpl.cq, desc_n);
+ if (err) {
+ ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err));
+ assert(err > 0);
+ return err;
+ }
/* Reconfigure flows. Do not care for errors. */
- if (!priv->rss) {
+ if (!priv->rss && !priv->isolated) {
rxq_mac_addrs_add(&tmpl);
if (priv->promisc)
rxq_promiscuous_enable(&tmpl);
@@ -3562,6 +3662,8 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
rxq->elts_n = 0;
rte_free(rxq->elts.sp);
rxq->elts.sp = NULL;
+ if (!tmpl.qp)
+ goto skip_rtr;
/* Post WRs. */
err = ibv_post_recv(tmpl.qp,
(tmpl.sp ?
@@ -3589,6 +3691,116 @@ skip_rtr:
}
/**
+ * Create verbs QP resources associated with a rxq.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param inactive
+ * If true, the queue is disabled because its index is higher or
+ * equal to the real number of queues, which must be a power of 2.
+ * @param children_n
+ * The number of children in a parent case, zero for a child.
+ * @param rxq_parent
+ * The pointer to a parent RX structure for a child in RSS case,
+ * NULL for parent.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+rxq_create_qp(struct rxq *rxq,
+ uint16_t desc,
+ int inactive,
+ int children_n,
+ struct rxq *rxq_parent)
+{
+ int ret;
+ struct ibv_exp_qp_attr mod;
+ struct ibv_exp_query_intf_params params;
+ enum ibv_exp_query_intf_status status;
+ struct ibv_recv_wr *bad_wr;
+ int parent = (children_n > 0);
+ struct priv *priv = rxq->priv;
+
+#ifdef RSS_SUPPORT
+ if (priv->rss && !inactive && (rxq_parent || parent))
+ rxq->qp = rxq_setup_qp_rss(priv, rxq->cq, desc,
+ children_n, rxq->rd,
+ rxq_parent);
+ else
+#endif /* RSS_SUPPORT */
+ rxq->qp = rxq_setup_qp(priv, rxq->cq, desc, rxq->rd);
+ if (rxq->qp == NULL) {
+ ret = (errno ? errno : EINVAL);
+ ERROR("QP creation failure: %s",
+ strerror(ret));
+ return ret;
+ }
+ mod = (struct ibv_exp_qp_attr){
+ /* Move the QP to this state. */
+ .qp_state = IBV_QPS_INIT,
+ /* Primary port number. */
+ .port_num = priv->port
+ };
+ ret = ibv_exp_modify_qp(rxq->qp, &mod,
+ (IBV_EXP_QP_STATE |
+#ifdef RSS_SUPPORT
+ (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
+#endif /* RSS_SUPPORT */
+ IBV_EXP_QP_PORT));
+ if (ret) {
+ ERROR("QP state to IBV_QPS_INIT failed: %s",
+ strerror(ret));
+ return ret;
+ }
+ if (!priv->isolated && (parent || !priv->rss)) {
+ /* Configure MAC and broadcast addresses. */
+ ret = rxq_mac_addrs_add(rxq);
+ if (ret) {
+ ERROR("QP flow attachment failed: %s",
+ strerror(ret));
+ return ret;
+ }
+ }
+ if (!parent) {
+ ret = ibv_post_recv(rxq->qp,
+ (rxq->sp ?
+ &(*rxq->elts.sp)[0].wr :
+ &(*rxq->elts.no_sp)[0].wr),
+ &bad_wr);
+ if (ret) {
+ ERROR("ibv_post_recv() failed for WR %p: %s",
+ (void *)bad_wr,
+ strerror(ret));
+ return ret;
+ }
+ }
+ mod = (struct ibv_exp_qp_attr){
+ .qp_state = IBV_QPS_RTR
+ };
+ ret = ibv_exp_modify_qp(rxq->qp, &mod, IBV_EXP_QP_STATE);
+ if (ret) {
+ ERROR("QP state to IBV_QPS_RTR failed: %s",
+ strerror(ret));
+ return ret;
+ }
+ params = (struct ibv_exp_query_intf_params){
+ .intf_scope = IBV_EXP_INTF_GLOBAL,
+ .intf = IBV_EXP_INTF_QP_BURST,
+ .obj = rxq->qp,
+ };
+ rxq->if_qp = ibv_exp_query_intf(priv->ctx, &params, &status);
+ if (rxq->if_qp == NULL) {
+ ERROR("QP interface family query failed with status %d",
+ status);
+ return errno;
+ }
+ return 0;
+}
+
+/**
* Configure a RX queue.
*
* @param dev
@@ -3606,14 +3818,21 @@ skip_rtr:
* Thresholds parameters.
* @param mp
* Memory pool for buffer allocations.
+ * @param children_n
+ * The number of children in a parent case, zero for a child.
+ * @param rxq_parent
+ * The pointer to a parent RX structure (or NULL) in a child case,
+ * NULL for parent.
*
* @return
* 0 on success, errno value on failure.
*/
static int
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, int inactive, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+ unsigned int socket, int inactive,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp, int children_n,
+ struct rxq *rxq_parent)
{
struct priv *priv = dev->data->dev_private;
struct rxq tmpl = {
@@ -3621,17 +3840,15 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
.mp = mp,
.socket = socket
};
- struct ibv_exp_qp_attr mod;
union {
struct ibv_exp_query_intf_params params;
struct ibv_exp_cq_init_attr cq;
struct ibv_exp_res_domain_init_attr rd;
} attr;
enum ibv_exp_query_intf_status status;
- struct ibv_recv_wr *bad_wr;
unsigned int mb_len;
int ret = 0;
- int parent = (rxq == &priv->rxq_parent);
+ int parent = (children_n > 0);
(void)conf; /* Thresholds configuration (ignored). */
/*
@@ -3696,11 +3913,22 @@ skip_mr:
(void *)dev, strerror(ret));
goto error;
}
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ tmpl.channel = ibv_create_comp_channel(priv->ctx);
+ if (tmpl.channel == NULL) {
+ ret = ENOMEM;
+ ERROR("%p: Rx interrupt completion channel creation"
+ " failure: %s",
+ (void *)dev, strerror(ret));
+ goto error;
+ }
+ }
attr.cq = (struct ibv_exp_cq_init_attr){
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
.res_domain = tmpl.rd,
};
- tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0,
+ &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
ERROR("%p: CQ creation failure: %s",
@@ -3711,45 +3939,6 @@ skip_mr:
priv->device_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.max_sge);
-#ifdef RSS_SUPPORT
- if (priv->rss && !inactive)
- tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
- tmpl.rd);
- else
-#endif /* RSS_SUPPORT */
- tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
- if (tmpl.qp == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("%p: QP creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- mod = (struct ibv_exp_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
- };
- ret = ibv_exp_modify_qp(tmpl.qp, &mod,
- (IBV_EXP_QP_STATE |
-#ifdef RSS_SUPPORT
- (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
-#endif /* RSS_SUPPORT */
- IBV_EXP_QP_PORT));
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- if ((parent) || (!priv->rss)) {
- /* Configure MAC and broadcast addresses. */
- ret = rxq_mac_addrs_add(&tmpl);
- if (ret) {
- ERROR("%p: QP flow attachment failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- }
/* Allocate descriptors for RX queues, except for the RSS parent. */
if (parent)
goto skip_alloc;
@@ -3760,29 +3949,14 @@ skip_mr:
if (ret) {
ERROR("%p: RXQ allocation failed: %s",
(void *)dev, strerror(ret));
- goto error;
- }
- ret = ibv_post_recv(tmpl.qp,
- (tmpl.sp ?
- &(*tmpl.elts.sp)[0].wr :
- &(*tmpl.elts.no_sp)[0].wr),
- &bad_wr);
- if (ret) {
- ERROR("%p: ibv_post_recv() failed for WR %p: %s",
- (void *)dev,
- (void *)bad_wr,
- strerror(ret));
- goto error;
+ return ret;
}
skip_alloc:
- mod = (struct ibv_exp_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
- (void *)dev, strerror(ret));
- goto error;
+ if (parent || rxq_parent || !priv->rss) {
+ ret = rxq_create_qp(&tmpl, desc, inactive,
+ children_n, rxq_parent);
+ if (ret)
+ goto error;
}
/* Save port ID. */
tmpl.port_id = dev->data->port_id;
@@ -3794,21 +3968,11 @@ skip_alloc:
};
tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
if (tmpl.if_cq == NULL) {
+ ret = EINVAL;
ERROR("%p: CQ interface family query failed with status %d",
(void *)dev, status);
goto error;
}
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_QP_BURST,
- .obj = tmpl.qp,
- };
- tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_qp == NULL) {
- ERROR("%p: QP interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
/* Clean up rxq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
rxq_cleanup(rxq);
@@ -3846,6 +4010,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
+ struct rxq *parent;
struct priv *priv = dev->data->dev_private;
struct rxq *rxq = (*priv->rxqs)[idx];
int inactive = 0;
@@ -3880,9 +4045,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -ENOMEM;
}
}
- if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1)
- inactive = 1;
- ret = rxq_setup(dev, rxq, desc, socket, inactive, conf, mp);
+ if (priv->rss && !priv->isolated) {
+ /* The list consists of the single default one. */
+ parent = LIST_FIRST(&priv->parents);
+ if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1)
+ inactive = 1;
+ } else {
+ parent = NULL;
+ }
+ ret = rxq_setup(dev, rxq, desc, socket,
+ inactive, conf, mp, 0, parent);
if (ret)
rte_free(rxq);
else {
@@ -3919,7 +4091,6 @@ mlx4_rx_queue_release(void *dpdk_rxq)
return;
priv = rxq->priv;
priv_lock(priv);
- assert(rxq != &priv->rxq_parent);
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] == rxq) {
DEBUG("%p: removing RX queue %p from list",
@@ -3970,8 +4141,11 @@ mlx4_dev_start(struct rte_eth_dev *dev)
}
DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
priv->started = 1;
- if (priv->rss) {
- rxq = &priv->rxq_parent;
+ if (priv->isolated) {
+ rxq = NULL;
+ r = 1;
+ } else if (priv->rss) {
+ rxq = LIST_FIRST(&priv->parents);
r = 1;
} else {
rxq = (*priv->rxqs)[0];
@@ -4005,6 +4179,12 @@ mlx4_dev_start(struct rte_eth_dev *dev)
(void *)dev);
goto err;
}
+ ret = priv_rx_intr_vec_enable(priv);
+ if (ret) {
+ ERROR("%p: Rx interrupt vector creation failed",
+ (void *)dev);
+ goto err;
+ }
ret = mlx4_priv_flow_start(priv);
if (ret) {
ERROR("%p: flow start failed: %s",
@@ -4053,8 +4233,11 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
}
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
priv->started = 0;
- if (priv->rss) {
- rxq = &priv->rxq_parent;
+ if (priv->isolated) {
+ rxq = NULL;
+ r = 1;
+ } else if (priv->rss) {
+ rxq = LIST_FIRST(&priv->parents);
r = 1;
} else {
rxq = (*priv->rxqs)[0];
@@ -4188,7 +4371,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
priv->txqs = NULL;
}
if (priv->rss)
- rxq_cleanup(&priv->rxq_parent);
+ priv_parent_list_cleanup(priv);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(ibv_dealloc_pd(priv->pd));
@@ -4197,6 +4380,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
assert(priv->ctx == NULL);
priv_dev_removal_interrupt_handler_uninstall(priv, dev);
priv_dev_link_interrupt_handler_uninstall(priv, dev);
+ priv_rx_intr_vec_disable(priv);
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -4300,7 +4484,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (priv == NULL)
return;
@@ -4481,6 +4665,8 @@ mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
if (mlx4_is_secondary())
return;
priv_lock(priv);
+ if (priv->isolated)
+ goto end;
DEBUG("%p: removing MAC address from index %" PRIu32,
(void *)dev, index);
/* Last array entry is reserved for broadcast. */
@@ -4514,6 +4700,12 @@ mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
return -ENOTSUP;
(void)vmdq;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot add MAC address, "
+ "device is in isolated mode", (void *)dev);
+ re = EPERM;
+ goto end;
+ }
DEBUG("%p: adding MAC address at index %" PRIu32,
(void *)dev, index);
/* Last array entry is reserved for broadcast. */
@@ -4561,6 +4753,12 @@ mlx4_promiscuous_enable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot enable promiscuous, "
+ "device is in isolated mode", (void *)dev);
+ priv_unlock(priv);
+ return;
+ }
if (priv->promisc) {
priv_unlock(priv);
return;
@@ -4569,7 +4767,7 @@ mlx4_promiscuous_enable(struct rte_eth_dev *dev)
if (!priv->started)
goto end;
if (priv->rss) {
- ret = rxq_promiscuous_enable(&priv->rxq_parent);
+ ret = rxq_promiscuous_enable(LIST_FIRST(&priv->parents));
if (ret) {
priv_unlock(priv);
return;
@@ -4609,12 +4807,12 @@ mlx4_promiscuous_disable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
- if (!priv->promisc) {
+ if (!priv->promisc || priv->isolated) {
priv_unlock(priv);
return;
}
if (priv->rss) {
- rxq_promiscuous_disable(&priv->rxq_parent);
+ rxq_promiscuous_disable(LIST_FIRST(&priv->parents));
goto end;
}
for (i = 0; (i != priv->rxqs_n); ++i)
@@ -4641,6 +4839,12 @@ mlx4_allmulticast_enable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot enable allmulticast, "
+ "device is in isolated mode", (void *)dev);
+ priv_unlock(priv);
+ return;
+ }
if (priv->allmulti) {
priv_unlock(priv);
return;
@@ -4649,7 +4853,7 @@ mlx4_allmulticast_enable(struct rte_eth_dev *dev)
if (!priv->started)
goto end;
if (priv->rss) {
- ret = rxq_allmulticast_enable(&priv->rxq_parent);
+ ret = rxq_allmulticast_enable(LIST_FIRST(&priv->parents));
if (ret) {
priv_unlock(priv);
return;
@@ -4689,12 +4893,12 @@ mlx4_allmulticast_disable(struct rte_eth_dev *dev)
if (mlx4_is_secondary())
return;
priv_lock(priv);
- if (!priv->allmulti) {
+ if (!priv->allmulti || priv->isolated) {
priv_unlock(priv);
return;
}
if (priv->rss) {
- rxq_allmulticast_disable(&priv->rxq_parent);
+ rxq_allmulticast_disable(LIST_FIRST(&priv->parents));
goto end;
}
for (i = 0; (i != priv->rxqs_n); ++i)
@@ -4832,7 +5036,7 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
/* Reenable non-RSS queue attributes. No need to check
* for errors at this stage. */
- if (!priv->rss) {
+ if (!priv->rss && !priv->isolated) {
rxq_mac_addrs_add(rxq);
if (priv->promisc)
rxq_promiscuous_enable(rxq);
@@ -5003,7 +5207,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
* Rehashing flows in all RX queues is necessary.
*/
if (priv->rss)
- rxq_mac_addrs_del(&priv->rxq_parent);
+ rxq_mac_addrs_del(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] != NULL)
@@ -5011,7 +5215,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->vlan_filter[j].enabled = 1;
if (priv->started) {
if (priv->rss)
- rxq_mac_addrs_add(&priv->rxq_parent);
+ rxq_mac_addrs_add(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
@@ -5025,7 +5229,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
* Rehashing flows in all RX queues is necessary.
*/
if (priv->rss)
- rxq_mac_addrs_del(&priv->rxq_parent);
+ rxq_mac_addrs_del(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] != NULL)
@@ -5033,7 +5237,7 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->vlan_filter[j].enabled = 0;
if (priv->started) {
if (priv->rss)
- rxq_mac_addrs_add(&priv->rxq_parent);
+ rxq_mac_addrs_add(LIST_FIRST(&priv->parents));
else
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
@@ -5067,6 +5271,12 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
if (mlx4_is_secondary())
return -E_RTE_SECONDARY;
priv_lock(priv);
+ if (priv->isolated) {
+ DEBUG("%p: cannot set vlan filter, "
+ "device is in isolated mode", (void *)dev);
+ priv_unlock(priv);
+ return -EINVAL;
+ }
ret = vlan_filter_set(dev, vlan_id, on);
priv_unlock(priv);
assert(ret >= 0);
@@ -5079,6 +5289,7 @@ const struct rte_flow_ops mlx4_flow_ops = {
.destroy = mlx4_flow_destroy,
.flush = mlx4_flow_flush,
.query = NULL,
+ .isolate = mlx4_flow_isolate,
};
/**
@@ -5157,6 +5368,8 @@ static const struct eth_dev_ops mlx4_dev_ops = {
.mac_addr_set = mlx4_mac_addr_set,
.mtu_set = mlx4_dev_set_mtu,
.filter_ctrl = mlx4_dev_filter_ctrl,
+ .rx_queue_intr_enable = mlx4_rx_intr_enable,
+ .rx_queue_intr_disable = mlx4_rx_intr_disable,
};
/**
@@ -5196,7 +5409,7 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
/* Extract information. */
if (sscanf(line,
"PCI_SLOT_NAME="
- "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
&pci_addr->domain,
&pci_addr->bus,
&pci_addr->devid,
@@ -5308,6 +5521,7 @@ priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
{
struct ibv_async_event event;
int port_change = 0;
+ struct rte_eth_link *link = &dev->data->dev_link;
int ret = 0;
*events = 0;
@@ -5329,22 +5543,20 @@ priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
}
-
- if (port_change ^ priv->pending_alarm) {
- struct rte_eth_link *link = &dev->data->dev_link;
-
- priv->pending_alarm = 0;
- mlx4_link_update(dev, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
+ if (!port_change)
+ return ret;
+ mlx4_link_update(dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ if (!priv->pending_alarm) {
/* Inconsistent status, check again later. */
priv->pending_alarm = 1;
rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US,
mlx4_dev_link_status_handler,
dev);
- } else {
- *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
}
+ } else {
+ *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
}
return ret;
}
@@ -5365,10 +5577,12 @@ mlx4_dev_link_status_handler(void *arg)
priv_lock(priv);
assert(priv->pending_alarm == 1);
+ priv->pending_alarm = 0;
ret = priv_dev_status_handler(priv, dev, &events);
priv_unlock(priv);
if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
}
/**
@@ -5397,7 +5611,8 @@ mlx4_dev_interrupt_handler(void *cb_arg)
i++) {
if (ev & (1 << i)) {
ev &= ~(1 << i);
- _rte_eth_dev_callback_process(dev, i, NULL);
+ _rte_eth_dev_callback_process(dev, i, NULL,
+ NULL);
ret--;
}
}
@@ -5592,6 +5807,154 @@ priv_dev_removal_interrupt_handler_install(struct priv *priv,
}
/**
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+static int
+priv_rx_intr_vec_enable(struct priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ if (!priv->dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ priv_rx_intr_vec_disable(priv);
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ ERROR("failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -ENOMEM;
+ }
+ intr_handle->type = RTE_INTR_HANDLE_EXT;
+ for (i = 0; i != n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ int fd;
+ int flags;
+ int rc;
+
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || !rxq->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ ERROR("too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ priv_rx_intr_vec_disable(priv);
+ return -1;
+ }
+ fd = rxq->channel->fd;
+ flags = fcntl(fd, F_GETFL);
+ rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ ERROR("failed to make Rx interrupt file descriptor"
+ " %d non-blocking for queue index %d", fd, i);
+ priv_rx_intr_vec_disable(priv);
+ return rc;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = fd;
+ count++;
+ }
+ if (!count)
+ priv_rx_intr_vec_disable(priv);
+ else
+ intr_handle->nb_efd = count;
+ return 0;
+}
+
+/**
+ * Clean up Rx interrupts handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_rx_intr_vec_disable(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt enable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+static int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rxq *rxq = (*priv->rxqs)[idx];
+ int ret;
+
+ if (!rxq || !rxq->channel)
+ ret = EINVAL;
+ else
+ ret = ibv_req_notify_cq(rxq->cq, 0);
+ if (ret)
+ WARN("unable to arm interrupt on rx queue %d", idx);
+ return -ret;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt disable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+static int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rxq *rxq = (*priv->rxqs)[idx];
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
+
+ if (!rxq || !rxq->channel) {
+ ret = EINVAL;
+ } else {
+ ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq->cq)
+ ret = EINVAL;
+ }
+ if (ret)
+ WARN("unable to disable interrupt on rx queue %d",
+ idx);
+ else
+ ibv_ack_cq_events(rxq->cq, 1);
+ return -ret;
+}
+
+/**
* Verify and store value for device argument.
*
* @param[in] key
@@ -5760,12 +6123,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
- if (ibv_query_device(attr_ctx, &device_attr))
+ if (ibv_query_device(attr_ctx, &device_attr)) {
+ err = ENODEV;
goto error;
+ }
INFO("%u port(s) detected", device_attr.phys_port_cnt);
if (mlx4_args(pci_dev->device.devargs, &conf)) {
ERROR("failed to process device arguments");
+ err = EINVAL;
goto error;
}
/* Use all ports when none are defined */
@@ -5799,19 +6165,23 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("using port %u (%08" PRIx32 ")", port, test);
ctx = ibv_open_device(ibv_dev);
- if (ctx == NULL)
+ if (ctx == NULL) {
+ err = ENODEV;
goto port_error;
+ }
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
ERROR("port query failed: %s", strerror(err));
+ err = ENODEV;
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
ERROR("port %d is not configured in Ethernet mode",
port);
+ err = EINVAL;
goto port_error;
}
@@ -5848,6 +6218,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
#ifdef HAVE_EXP_QUERY_DEVICE
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
+ err = ENODEV;
goto port_error;
}
#ifdef RSS_SUPPORT
@@ -5923,6 +6294,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
" (errno: %s)", strerror(errno));
+ err = ENODEV;
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
@@ -6002,8 +6374,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->device->driver = &mlx4_driver.driver;
+ /*
+ * Copy and override interrupt handle to prevent it from
+ * being shared between all ethdev instances of a given PCI
+ * device. This is required to properly handle Rx interrupts
+ * on all ports.
+ */
+ priv->intr_handle_dev = *eth_dev->intr_handle;
+ eth_dev->intr_handle = &priv->intr_handle_dev;
+
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx4_dev_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 9a3bae90..c0ade4f1 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -182,7 +182,13 @@ enum {
(DEBUG__(__VA_ARGS__), 0) \
})[0])
#define DEBUG(...) DEBUG_(__VA_ARGS__, '\n')
+#ifndef MLX4_PMD_DEBUG_BROKEN_VERBS
#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#else /* MLX4_PMD_DEBUG_BROKEN_VERBS */
+#define claim_zero(...) \
+ (void)(((__VA_ARGS__) == 0) || \
+ DEBUG("Assertion `(" # __VA_ARGS__ ") == 0' failed (IGNORED)."))
+#endif /* MLX4_PMD_DEBUG_BROKEN_VERBS */
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
#define claim_positive(...) assert((__VA_ARGS__) >= 0)
#else /* NDEBUG */
@@ -219,6 +225,7 @@ struct rxq_elt {
/* RX queue descriptor. */
struct rxq {
+ LIST_ENTRY(rxq) next; /* Used by parent queue only */
struct priv *priv; /* Back pointer to private data. */
struct rte_mempool *mp; /* Memory Pool for allocations. */
struct ibv_mr *mr; /* Memory Region (for mp). */
@@ -226,6 +233,7 @@ struct rxq {
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+ struct ibv_comp_channel *channel;
/*
* Each VLAN ID requires a separate flow steering rule.
*/
@@ -246,6 +254,10 @@ struct rxq {
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
+ struct {
+ uint16_t queues_n;
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT];
+ } rss;
};
/* TX element. */
@@ -334,24 +346,41 @@ struct priv {
unsigned int rss:1; /* RSS is enabled. */
unsigned int vf:1; /* This is a VF device. */
unsigned int pending_alarm:1; /* An alarm is pending. */
+ unsigned int isolated:1; /* Toggle isolated mode. */
#ifdef INLINE_RECV
unsigned int inl_recv_size; /* Inline recv size */
#endif
unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
/* RX/TX queues. */
- struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct rxq *(*rxqs)[]; /* RX queues. */
struct txq *(*txqs)[]; /* TX queues. */
+ struct rte_intr_handle intr_handle_dev; /* Device interrupt handler. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
LIST_HEAD(mlx4_flows, rte_flow) flows;
struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
+ LIST_HEAD(mlx4_parents, rxq) parents;
rte_spinlock_t lock; /* Lock for control functions. */
};
void priv_lock(struct priv *priv);
void priv_unlock(struct priv *priv);
+int
+rxq_create_qp(struct rxq *rxq,
+ uint16_t desc,
+ int inactive,
+ int children_n,
+ struct rxq *rxq_parent);
+
+void
+rxq_parent_cleanup(struct rxq *parent);
+
+struct rxq *
+priv_parent_create(struct priv *priv,
+ uint16_t queues[],
+ uint16_t children_n);
+
#endif /* RTE_PMD_MLX4_H_ */
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index edfac038..925c89c5 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -112,6 +112,7 @@ struct rte_flow_drop {
static const enum rte_flow_action_type valid_actions[] = {
RTE_FLOW_ACTION_TYPE_DROP,
RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_RSS,
RTE_FLOW_ACTION_TYPE_END,
};
@@ -672,6 +673,76 @@ priv_flow_validate(struct priv *priv,
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
action.queue = 1;
+ action.queues_n = 1;
+ action.queues[0] = queue->index;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ int i;
+ int ierr;
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+
+ if (!priv->hw_rss) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "RSS cannot be used with "
+ "the current configuration");
+ return -rte_errno;
+ }
+ if (!priv->isolated) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "RSS cannot be used without "
+ "isolated mode");
+ return -rte_errno;
+ }
+ if (!rte_is_power_of_2(rss->num)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "the number of queues "
+ "should be power of two");
+ return -rte_errno;
+ }
+ if (priv->max_rss_tbl_sz < rss->num) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "the number of queues "
+ "is too large");
+ return -rte_errno;
+ }
+ /* checking indexes array */
+ ierr = 0;
+ for (i = 0; i < rss->num; ++i) {
+ int j;
+ if (rss->queue[i] >= priv->rxqs_n)
+ ierr = 1;
+ /*
+ * Prevent the user from specifying
+ * the same queue twice in the RSS array.
+ */
+ for (j = i + 1; j < rss->num && !ierr; ++j)
+ if (rss->queue[j] == rss->queue[i])
+ ierr = 1;
+ if (ierr) {
+ rte_flow_error_set(
+ error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "RSS action only supports "
+ "unique queue indices "
+ "in a list");
+ return -rte_errno;
+ }
+ }
+ action.queue = 1;
+ action.queues_n = rss->num;
+ for (i = 0; i < rss->num; ++i)
+ action.queues[i] = rss->queue[i];
} else {
goto exit_action_not_supported;
}
@@ -797,6 +868,82 @@ err:
}
/**
+ * Get RSS parent rxq structure for given queues.
+ *
+ * Creates a new or returns an existed one.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param queues
+ * queues indices array, NULL in default RSS case.
+ * @param children_n
+ * the size of queues array.
+ *
+ * @return
+ * Pointer to a parent rxq structure, NULL on failure.
+ */
+static struct rxq *
+priv_parent_get(struct priv *priv,
+ uint16_t queues[],
+ uint16_t children_n,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ struct rxq *parent;
+
+ for (parent = LIST_FIRST(&priv->parents);
+ parent;
+ parent = LIST_NEXT(parent, next)) {
+ unsigned int same = 0;
+ unsigned int overlap = 0;
+
+ /*
+ * Find out whether an appropriate parent queue already exists
+ * and can be reused, otherwise make sure there are no overlaps.
+ */
+ for (i = 0; i < children_n; ++i) {
+ unsigned int j;
+
+ for (j = 0; j < parent->rss.queues_n; ++j) {
+ if (parent->rss.queues[j] != queues[i])
+ continue;
+ ++overlap;
+ if (i == j)
+ ++same;
+ }
+ }
+ if (same == children_n &&
+ children_n == parent->rss.queues_n)
+ return parent;
+ else if (overlap)
+ goto error;
+ }
+ /* Exclude the cases when some QPs were created without RSS */
+ for (i = 0; i < children_n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[queues[i]];
+ if (rxq->qp)
+ goto error;
+ }
+ parent = priv_parent_create(priv, queues, children_n);
+ if (!parent) {
+ rte_flow_error_set(error,
+ ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "flow rule creation failure");
+ return NULL;
+ }
+ return parent;
+
+error:
+ rte_flow_error_set(error,
+ EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "sharing a queue between several"
+ " RSS groups is not supported");
+ return NULL;
+}
+
+/**
* Complete flow rule creation.
*
* @param priv
@@ -819,6 +966,7 @@ priv_flow_create_action_queue(struct priv *priv,
{
struct ibv_qp *qp;
struct rte_flow *rte_flow;
+ struct rxq *rxq_parent = NULL;
assert(priv->pd);
assert(priv->ctx);
@@ -829,14 +977,46 @@ priv_flow_create_action_queue(struct priv *priv,
return NULL;
}
if (action->drop) {
- qp = priv->flow_drop_queue->qp;
+ qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
} else {
- struct rxq *rxq = (*priv->rxqs)[action->queue_id];
+ int ret;
+ unsigned int i;
+ struct rxq *rxq = NULL;
- qp = rxq->qp;
+ if (action->queues_n > 1) {
+ rxq_parent = priv_parent_get(priv, action->queues,
+ action->queues_n, error);
+ if (!rxq_parent)
+ goto error;
+ }
+ for (i = 0; i < action->queues_n; ++i) {
+ rxq = (*priv->rxqs)[action->queues[i]];
+ /*
+ * In case of isolated mode we postpone
+ * ibv receive queue creation till the first
+ * rte_flow rule will be applied on that queue.
+ */
+ if (!rxq->qp) {
+ assert(priv->isolated);
+ ret = rxq_create_qp(rxq, rxq->elts_n,
+ 0, 0, rxq_parent);
+ if (ret) {
+ rte_flow_error_set(
+ error,
+ ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "flow rule creation failure");
+ goto error;
+ }
+ }
+ }
+ qp = action->queues_n > 1 ? rxq_parent->qp : rxq->qp;
rte_flow->qp = qp;
}
rte_flow->ibv_attr = ibv_attr;
+ if (!priv->started)
+ return rte_flow;
rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -846,6 +1026,8 @@ priv_flow_create_action_queue(struct priv *priv,
return rte_flow;
error:
+ if (rxq_parent)
+ rxq_parent_cleanup(rxq_parent);
rte_free(rte_flow);
return NULL;
}
@@ -909,11 +1091,22 @@ priv_flow_create(struct priv *priv,
continue;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
action.queue = 1;
- action.queue_id =
+ action.queues_n = 1;
+ action.queues[0] =
((const struct rte_flow_action_queue *)
actions->conf)->index;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
action.drop = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ unsigned int i;
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+
+ action.queue = 1;
+ action.queues_n = rss->num;
+ for (i = 0; i < rss->num; ++i)
+ action.queues[i] = rss->queue[i];
} else {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -957,6 +1150,43 @@ mlx4_flow_create(struct rte_eth_dev *dev,
}
/**
+ * @see rte_flow_isolate()
+ *
+ * Must be done before calling dev_configure().
+ *
+ * @param dev
+ * Pointer to the ethernet device structure.
+ * @param enable
+ * Nonzero to enter isolated mode, attempt to leave it otherwise.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative value on error.
+ */
+int
+mlx4_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ if (priv->rxqs) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "isolated mode must be set"
+ " before configuring the device");
+ priv_unlock(priv);
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
* Destroy a flow.
*
* @param priv
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index 12a293e4..beabcf2d 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -90,10 +90,16 @@ struct mlx4_flow {
unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
};
+int
+mlx4_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error);
+
struct mlx4_flow_action {
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t queue:1; /**< Target is a receive queue. */
- uint32_t queue_id; /**< Identifier of the queue. */
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queue indices to use. */
+ uint16_t queues_n; /**< Number of entries in queue[] */
};
int mlx4_priv_flow_start(struct priv *priv);
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index c0799591..8736de5d 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -39,6 +39,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx_vec_sse.c
+endif
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_trigger.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mac.c
@@ -52,7 +55,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
# Basic CFLAGS.
CFLAGS += -O3
-CFLAGS += -std=gnu99 -Wall -Wextra
+CFLAGS += -std=c11 -Wall -Wextra
CFLAGS += -g
CFLAGS += -I.
CFLAGS += -D_BSD_SOURCE
@@ -101,6 +104,11 @@ mlx5_autoconf.h.new: FORCE
mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
+ HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP \
+ infiniband/verbs_exp.h \
+ enum IBV_EXP_FLOW_SPEC_ACTION_DROP \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
infiniband/verbs_exp.h \
enum IBV_EXP_CQ_COMPRESSED_CQE \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index bcb2c1b2..b7e50463 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -94,6 +94,12 @@
/* Device parameter to enable hardware TSO offload. */
#define MLX5_TSO "tso"
+/* Device parameter to enable hardware Tx vector. */
+#define MLX5_TX_VEC_EN "tx_vec_en"
+
+/* Device parameter to enable hardware Rx vector. */
+#define MLX5_RX_VEC_EN "rx_vec_en"
+
/* Default PMD specific parameter value. */
#define MLX5_ARG_UNSET (-1)
@@ -105,6 +111,8 @@ struct mlx5_args {
int mpw_hdr_dseg;
int inline_max_packet_sz;
int tso;
+ int tx_vec_en;
+ int rx_vec_en;
};
/**
* Retrieve integer value from environment variable.
@@ -246,8 +254,10 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
+#ifdef HAVE_UPDATE_CQ_CI
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
+#endif
};
static struct {
@@ -322,6 +332,10 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
args->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TSO, key) == 0) {
args->tso = !!tmp;
+ } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
+ args->tx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
+ args->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -351,6 +365,8 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TSO,
+ MLX5_TX_VEC_EN,
+ MLX5_RX_VEC_EN,
NULL,
};
struct rte_kvargs *kvlist;
@@ -406,6 +422,10 @@ mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
priv->inline_max_packet_sz = args->inline_max_packet_sz;
if (args->tso != MLX5_ARG_UNSET)
priv->tso = args->tso;
+ if (args->tx_vec_en != MLX5_ARG_UNSET)
+ priv->tx_vec_en = args->tx_vec_en;
+ if (args->rx_vec_en != MLX5_ARG_UNSET)
+ priv->rx_vec_en = args->rx_vec_en;
}
/**
@@ -551,6 +571,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
.mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
.tso = MLX5_ARG_UNSET,
+ .tx_vec_en = MLX5_ARG_UNSET,
+ .rx_vec_en = MLX5_ARG_UNSET,
};
exp_device_attr.comp_mask =
@@ -613,6 +635,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
priv->tunnel_en = tunnel_en;
+ /* Enable vector by default if supported. */
+ priv->tx_vec_en = 1;
+ priv->rx_vec_en = 1;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
@@ -786,6 +811,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
eth_dev->device->driver = &mlx5_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
@@ -885,6 +911,8 @@ RTE_INIT(rte_mlx5_pmd_init);
static void
rte_mlx5_pmd_init(void)
{
+ /* Build the static table for ptype conversion. */
+ mlx5_set_ptype_table();
/*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
* huge pages. Calling ibv_fork_init() during init allows
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1148dee3..43c53841 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -54,6 +54,7 @@
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
+#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_spinlock.h>
@@ -129,6 +130,9 @@ struct priv {
unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tunnel_en:1;
+ unsigned int isolated:1; /* Whether isolated mode is enabled. */
+ unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
+ unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
@@ -311,6 +315,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
struct rte_flow_error *);
int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
int priv_flow_start(struct priv *);
void priv_flow_stop(struct priv *);
int priv_flow_rxq_in_use(struct priv *, struct rxq *);
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 201bb336..a76bc6f6 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -89,4 +89,22 @@
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
#define MLX5_MAX_TSO_HEADER 128
+/* Default minimum number of Tx queues for vectorized Tx. */
+#define MLX5_VPMD_MIN_TXQS 4
+
+/* Threshold of buffer replenishment for vectorized Rx. */
+#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U
+
+/* Maximum size of burst for vectorized Rx. */
+#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH
+
+/*
+ * Maximum size of burst for vectorized Tx. This is related to the maximum size
+ * of Enhaned MPW (eMPW) WQE as vectorized Tx is supported with eMPW.
+ */
+#define MLX5_VPMD_TX_MAX_BURST 32U
+
+/* Number of packets vectorized Rx can simultaneously process in a loop. */
+#define MLX5_VPMD_DESCS_PER_LOOP 4
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 3fd22cb8..b0eb3cdf 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -119,6 +119,7 @@ struct ethtool_link_settings {
#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
#endif
+#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
/**
* Return private structure associated with an Ethernet device.
@@ -661,7 +662,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
@@ -715,15 +716,24 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
/* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
RTE_PTYPE_UNKNOWN
-
};
- if (dev->rx_pkt_burst == mlx5_rx_burst)
+ if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_vec)
return ptypes;
return NULL;
}
@@ -806,9 +816,12 @@ static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
struct priv *priv = mlx5_get_priv(dev);
- struct ethtool_link_settings edata = {
- .cmd = ETHTOOL_GLINKSETTINGS,
- };
+ __extension__ struct {
+ struct ethtool_link_settings edata;
+ uint32_t link_mode_data[3 *
+ ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
+ } ecmd;
+
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
@@ -821,15 +834,23 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = (void *)&edata;
+ memset(&ecmd, 0, sizeof(ecmd));
+ ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
+ ifr.ifr_data = (void *)&ecmd;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- dev_link.link_speed = edata.speed;
- sc = edata.link_mode_masks[0] |
- ((uint64_t)edata.link_mode_masks[1] << 32);
+ dev_link.link_speed = ecmd.edata.speed;
+ sc = ecmd.edata.link_mode_masks[0] |
+ ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
@@ -865,7 +886,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
- dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
@@ -903,12 +924,6 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
/**
* DPDK callback to change the MTU.
*
- * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
- * received). Use this as a hint to enable/disable scattered packets support
- * and improve performance when not needed.
- * Since failure is not an option, reconfiguring queues on the fly is not
- * recommended.
- *
* @param dev
* Pointer to Ethernet device structure.
* @param in_mtu
@@ -921,123 +936,33 @@ int
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
+ uint16_t kern_mtu;
int ret = 0;
- unsigned int i;
- uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
- mlx5_rx_burst;
- unsigned int max_frame_len;
- int rehash;
- int restart = priv->started;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
priv_lock(priv);
+ ret = priv_get_mtu(priv, &kern_mtu);
+ if (ret)
+ goto out;
/* Set kernel interface MTU first. */
- if (priv_set_mtu(priv, mtu)) {
- ret = errno;
- WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
- strerror(ret));
+ ret = priv_set_mtu(priv, mtu);
+ if (ret)
goto out;
- } else
+ ret = priv_get_mtu(priv, &kern_mtu);
+ if (ret)
+ goto out;
+ if (kern_mtu == mtu) {
+ priv->mtu = mtu;
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
- /* Temporarily replace RX handler with a fake one, assuming it has not
- * been copied elsewhere. */
- dev->rx_pkt_burst = removed_rx_burst;
- /* Make sure everyone has left mlx5_rx_burst() and uses
- * removed_rx_burst() instead. */
- rte_wmb();
- usleep(1000);
- /* MTU does not include header and CRC. */
- max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
- /* Check if at least one queue is going to need a SGE update. */
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- unsigned int mb_len;
- unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
- unsigned int sges_n;
-
- if (rxq == NULL)
- continue;
- mb_len = rte_pktmbuf_data_room_size(rxq->mp);
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /*
- * Determine the number of SGEs needed for a full packet
- * and round it to the next power of two.
- */
- sges_n = log2above((size / mb_len) + !!(size % mb_len));
- if (sges_n != rxq->sges_n)
- break;
- }
- /*
- * If all queues have the right number of SGEs, a simple rehash
- * of their buffers is enough, otherwise SGE information can only
- * be updated in a queue by recreating it. All resources that depend
- * on queues (flows, indirection tables) must be recreated as well in
- * that case.
- */
- rehash = (i == priv->rxqs_n);
- if (!rehash) {
- /* Clean up everything as with mlx5_dev_stop(). */
- priv_special_flow_disable_all(priv);
- priv_mac_addrs_disable(priv);
- priv_destroy_hash_rxqs(priv);
- priv_fdir_disable(priv);
- priv_dev_interrupt_handler_uninstall(priv, dev);
}
-recover:
- /* Reconfigure each RX queue. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct rxq_ctrl, rxq);
- unsigned int mb_len;
- unsigned int tmp;
-
- if (rxq == NULL)
- continue;
- mb_len = rte_pktmbuf_data_room_size(rxq->mp);
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame =
- (max_frame_len > ETHER_MAX_LEN);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
- if (rehash)
- ret = rxq_rehash(dev, rxq_ctrl);
- else
- ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
- rxq_ctrl->socket, NULL, rxq->mp);
- if (!ret)
- continue;
- /* Attempt to roll back in case of error. */
- tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
- if (max_frame_len != tmp) {
- max_frame_len = tmp;
- goto recover;
- }
- /* Double fault, disable RX. */
- break;
- }
- /*
- * Use a safe RX burst function in case of error, otherwise mimic
- * mlx5_dev_start().
- */
- if (ret) {
- ERROR("unable to reconfigure RX queues, RX disabled");
- rx_func = removed_rx_burst;
- } else if (restart &&
- !rehash &&
- !priv_create_hash_rxqs(priv) &&
- !priv_rehash_flows(priv)) {
- if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
- priv_fdir_enable(priv);
- priv_dev_interrupt_handler_install(priv, dev);
- }
- priv->mtu = mtu;
- /* Burst functions can now be called again. */
- rte_wmb();
- dev->rx_pkt_burst = rx_func;
+ priv_unlock(priv);
+ return 0;
out:
+ ret = errno;
+ WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
+ strerror(ret));
priv_unlock(priv);
assert(ret >= 0);
return -ret;
@@ -1185,7 +1110,7 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
/* Extract information. */
if (sscanf(line,
"PCI_SLOT_NAME="
- "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+ "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
&pci_addr->domain,
&pci_addr->bus,
&pci_addr->devid,
@@ -1262,7 +1187,8 @@ mlx5_dev_link_status_handler(void *arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
}
/**
@@ -1284,7 +1210,8 @@ mlx5_dev_interrupt_handler(void *cb_arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+ NULL);
}
/**
@@ -1584,9 +1511,16 @@ priv_select_tx_function(struct priv *priv)
priv->dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) {
- priv->dev->tx_pkt_burst =
- mlx5_tx_burst_empw;
- DEBUG("selected Enhanced MPW TX function");
+ if (priv_check_vec_tx_support(priv) > 0) {
+ if (priv_check_raw_vec_tx_support(priv) > 0)
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ else
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ DEBUG("selected Enhanced MPW TX vectorized function");
+ } else {
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ DEBUG("selected Enhanced MPW TX function");
+ }
} else if (priv->mps && priv->txq_inline) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
@@ -1605,5 +1539,11 @@ priv_select_tx_function(struct priv *priv)
void
priv_select_rx_function(struct priv *priv)
{
- priv->dev->rx_pkt_burst = mlx5_rx_burst;
+ if (priv_check_vec_rx_support(priv) > 0) {
+ priv_prep_vec_rx_function(priv);
+ priv->dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ DEBUG("selected RX vectorized function");
+ } else {
+ priv->dev->rx_pkt_burst = mlx5_rx_burst;
+ }
}
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index c8d47489..34a7e69f 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -1053,6 +1053,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.destroy = mlx5_flow_destroy,
.flush = mlx5_flow_flush,
.query = NULL,
+ .isolate = mlx5_flow_isolate,
};
/**
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8b3957ba..86be9291 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -53,7 +53,11 @@
#include "mlx5_prm.h"
/* Number of Work Queue necessary for the DROP queue. */
+#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
#define MLX5_DROP_WQ_N 4
+#else
+#define MLX5_DROP_WQ_N 1
+#endif
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
@@ -576,6 +580,10 @@ priv_flow_validate(struct priv *priv,
}
if (action->mark && !flow->ibv_attr && !action->drop)
flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+ if (!flow->ibv_attr && action->drop)
+ flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
if (!action->queue && !action->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
@@ -993,6 +1001,10 @@ priv_flow_create_action_queue_drop(struct priv *priv,
struct rte_flow_error *error)
{
struct rte_flow *rte_flow;
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+ struct ibv_exp_flow_spec_action_drop *drop;
+ unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
assert(priv->pd);
assert(priv->ctx);
@@ -1003,10 +1015,19 @@ priv_flow_create_action_queue_drop(struct priv *priv,
return NULL;
}
rte_flow->drop = 1;
+#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
+ drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *drop = (struct ibv_exp_flow_spec_action_drop){
+ .type = IBV_EXP_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+ ++flow->ibv_attr->num_of_specs;
+ flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
+#endif
rte_flow->ibv_attr = flow->ibv_attr;
- rte_flow->qp = priv->flow_drop_queue->qp;
if (!priv->started)
return rte_flow;
+ rte_flow->qp = priv->flow_drop_queue->qp;
rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) {
@@ -1579,3 +1600,30 @@ priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
}
return 0;
}
+
+/**
+ * Isolated mode.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ if (priv->started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port must be stopped first");
+ priv_unlock(priv);
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ priv_unlock(priv);
+ return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 79e0c410..8489ea67 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -443,6 +443,8 @@ priv_mac_addrs_enable(struct priv *priv)
unsigned int i;
int ret;
+ if (priv->isolated)
+ return 0;
if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
return 0;
for (i = 0; (i != priv->hash_rxqs_n); ++i) {
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 0a363846..28733517 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -207,7 +207,8 @@ txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
sizeof(txq_ctrl->txq.mp2mr[0])));
}
/* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx].mp = mp;
+ txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr;
+ txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length;
txq_ctrl->txq.mp2mr[idx].mr = mr;
txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
@@ -265,18 +266,28 @@ txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
struct txq_mp2mr_mbuf_check_data data = {
.ret = 0,
};
+ uintptr_t start;
+ uintptr_t end;
unsigned int i;
/* Register mempool only if the first element looks like a mbuf. */
if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
data.ret == -1)
return;
+ if (mlx5_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return;
+ }
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- if (unlikely(txq_ctrl->txq.mp2mr[i].mp == NULL)) {
+ struct ibv_mr *mr = txq_ctrl->txq.mp2mr[i].mr;
+
+ if (unlikely(mr == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
- if (txq_ctrl->txq.mp2mr[i].mp == mp)
+ if (start >= (uintptr_t)mr->addr &&
+ end <= (uintptr_t)mr->addr + mr->length)
return;
}
txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 173e6e84..a67e5426 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -347,6 +347,8 @@ priv_special_flow_enable_all(struct priv *priv)
{
enum hash_rxq_flow_type flow_type;
+ if (priv->isolated)
+ return 0;
for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
flow_type != HASH_RXQ_FLOW_TYPE_MAC;
++flow_type) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 2a268398..74387a79 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -363,6 +363,8 @@ priv_create_hash_rxqs(struct priv *priv)
assert(priv->hash_rxqs_n == 0);
assert(priv->pd != NULL);
assert(priv->ctx != NULL);
+ if (priv->isolated)
+ return 0;
if (priv->rxqs_n == 0)
return EINVAL;
assert(priv->rxqs != NULL);
@@ -632,6 +634,32 @@ priv_rehash_flows(struct priv *priv)
}
/**
+ * Unlike regular Rx function, vPMD Rx doesn't replace mbufs immediately when
+ * receiving packets. Instead it replaces later in bulk. In rxq->elts[], entries
+ * from rq_pi to rq_ci are owned by device but the rest is already delivered to
+ * application. In order not to reuse those mbufs by rxq_alloc_elts(), this
+ * function must be called to replace used mbufs.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_trim_elts(struct rxq *rxq)
+{
+ const uint16_t q_n = (1 << rxq->elts_n);
+ const uint16_t q_mask = q_n - 1;
+ uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
+ uint16_t i;
+
+ if (!rxq->trim_elts)
+ return;
+ for (i = 0; i < used; ++i)
+ (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
+ rxq->trim_elts = 0;
+ return;
+}
+
+/**
* Allocate RX queue elements.
*
* @param rxq_ctrl
@@ -659,15 +687,13 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
volatile struct mlx5_wqe_data_seg *scat =
&(*rxq_ctrl->rxq.wqes)[i];
- if (pool != NULL) {
- buf = (*pool)[i];
- assert(buf != NULL);
+ buf = (pool != NULL) ? (*pool)[i] : NULL;
+ if (buf != NULL) {
rte_pktmbuf_reset(buf);
rte_pktmbuf_refcnt_update(buf, 1);
} else
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
- assert(pool == NULL);
ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
ret = ENOMEM;
goto error;
@@ -722,6 +748,7 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
{
unsigned int i;
+ rxq_trim_elts(&rxq_ctrl->rxq);
DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
if (rxq_ctrl->rxq.elts == NULL)
return;
@@ -760,72 +787,6 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
}
/**
- * Reconfigure RX queue buffers.
- *
- * rxq_rehash() does not allocate mbufs, which, if not done from the right
- * thread (such as a control thread), may corrupt the pool.
- * In case of failure, the queue is left untouched.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rxq_ctrl
- * RX queue pointer.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
-{
- unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
- unsigned int i;
- struct ibv_exp_wq_attr mod;
- int err;
-
- DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
- (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
- assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
- /* From now on, any failure will render the queue unusable.
- * Reinitialize WQ. */
- mod = (struct ibv_exp_wq_attr){
- .attr_mask = IBV_EXP_WQ_ATTR_STATE,
- .wq_state = IBV_EXP_WQS_RESET,
- };
- err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
- if (err) {
- ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
- assert(err > 0);
- return err;
- }
- /* Snatch mbufs from original queue. */
- claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
- for (i = 0; i != elts_n; ++i) {
- struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
-
- assert(rte_mbuf_refcnt_read(buf) == 2);
- rte_pktmbuf_free_seg(buf);
- }
- /* Change queue state to ready. */
- mod = (struct ibv_exp_wq_attr){
- .attr_mask = IBV_EXP_WQ_ATTR_STATE,
- .wq_state = IBV_EXP_WQS_RDY,
- };
- err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
- if (err) {
- ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
- (void *)dev, strerror(err));
- goto error;
- }
- /* Update doorbell counter. */
- rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
- rte_wmb();
- *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
-error:
- assert(err >= 0);
- return err;
-}
-
-/**
* Initialize RX queue.
*
* @param tmpl
@@ -858,6 +819,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
tmpl->rxq.cq_ci = 0;
tmpl->rxq.rq_ci = 0;
+ tmpl->rxq.rq_pi = 0;
tmpl->rxq.cq_db = cq_info.dbrec;
tmpl->rxq.wqes =
(volatile struct mlx5_wqe_data_seg (*)[])
@@ -888,7 +850,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
* @return
* 0 on success, errno value on failure.
*/
-int
+static int
rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
@@ -978,10 +940,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
if (dev->data->dev_conf.intr_conf.rxq) {
tmpl.channel = ibv_create_comp_channel(priv->ctx);
if (tmpl.channel == NULL) {
- dev->data->dev_conf.intr_conf.rxq = 0;
ret = ENOMEM;
- ERROR("%p: Comp Channel creation failure: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: Rx interrupt completion channel creation"
+ " failure: %s",
+ (void *)dev, strerror(ret));
goto error;
}
}
@@ -991,7 +953,12 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
if (priv->cqe_comp) {
attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
- cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (rxq_check_vec_support(&tmpl.rxq) < 0)
+ cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
}
tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,
&attr.cq);
@@ -1101,6 +1068,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
if (rxq_ctrl->rxq.elts_n) {
assert(1 << rxq_ctrl->rxq.elts_n == desc);
assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
+ rxq_trim_elts(&rxq_ctrl->rxq);
ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
} else
ret = rxq_alloc_elts(&tmpl, desc, NULL);
@@ -1163,6 +1131,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct priv *priv = dev->data->dev_private;
struct rxq *rxq = (*priv->rxqs)[idx];
struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ const uint16_t desc_pad = MLX5_VPMD_DESCS_PER_LOOP; /* For vPMD. */
int ret;
if (mlx5_is_secondary())
@@ -1196,7 +1165,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
rxq_ctrl = rte_realloc(rxq_ctrl,
sizeof(*rxq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
+ (desc + desc_pad) *
+ sizeof(struct rte_mbuf *),
RTE_CACHE_LINE_SIZE);
if (!rxq_ctrl) {
ERROR("%p: unable to reallocate queue index %u",
@@ -1207,7 +1177,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
} else {
rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
+ (desc + desc_pad) *
+ sizeof(struct rte_mbuf *),
0, socket);
if (rxq_ctrl == NULL) {
ERROR("%p: unable to allocate queue index %u",
@@ -1224,8 +1195,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
DEBUG("%p: adding RX queue %p to list",
(void *)dev, (void *)rxq_ctrl);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
- /* Update receive callback. */
- priv_select_rx_function(priv);
}
priv_unlock(priv);
return -ret;
@@ -1310,111 +1279,159 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
}
/**
- * Fill epoll fd list for rxq interrupts.
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
*
* @param priv
- * Private structure.
+ * Pointer to private structure.
*
* @return
* 0 on success, negative on failure.
*/
int
-priv_intr_efd_enable(struct priv *priv)
+priv_rx_intr_vec_enable(struct priv *priv)
{
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
- if (n == 0)
+ if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
- if (n < rxqs_n) {
- WARN("rxqs num is larger than EAL max interrupt vector "
- "%u > %u unable to supprt rxq interrupts",
- rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
- return -EINVAL;
+ priv_rx_intr_vec_disable(priv);
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ ERROR("failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -ENOMEM;
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
struct rxq_ctrl *rxq_ctrl =
container_of(rxq, struct rxq_ctrl, rxq);
- int fd = rxq_ctrl->channel->fd;
+ int fd;
int flags;
int rc;
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || !rxq_ctrl->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ ERROR("too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ priv_rx_intr_vec_disable(priv);
+ return -1;
+ }
+ fd = rxq_ctrl->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
- WARN("failed to change rxq interrupt file "
- "descriptor %d for queue index %d", fd, i);
+ ERROR("failed to make Rx interrupt file descriptor"
+ " %d non-blocking for queue index %d", fd, i);
+ priv_rx_intr_vec_disable(priv);
return -1;
}
- intr_handle->efds[i] = fd;
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = fd;
+ count++;
}
- intr_handle->nb_efd = n;
+ if (!count)
+ priv_rx_intr_vec_disable(priv);
+ else
+ intr_handle->nb_efd = count;
return 0;
}
/**
- * Clean epoll fd list for rxq interrupts.
+ * Clean up Rx interrupts handler.
*
* @param priv
- * Private structure.
+ * Pointer to private structure.
*/
void
-priv_intr_efd_disable(struct priv *priv)
+priv_rx_intr_vec_disable(struct priv *priv)
{
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
}
+#ifdef HAVE_UPDATE_CQ_CI
+
/**
- * Create and init interrupt vector array.
+ * DPDK callback for Rx queue interrupt enable.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
*
* @return
* 0 on success, negative on failure.
*/
int
-priv_create_intr_vec(struct priv *priv)
+mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- unsigned int rxqs_n = priv->rxqs_n;
- unsigned int i;
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ int ret;
- if (rxqs_n == 0)
- return 0;
- intr_handle->intr_vec = (int *)
- rte_malloc("intr_vec", rxqs_n * sizeof(int), 0);
- if (intr_handle->intr_vec == NULL) {
- WARN("Failed to allocate memory for intr_vec "
- "rxq interrupt will not be supported");
- return -ENOMEM;
- }
- for (i = 0; i != rxqs_n; ++i) {
- /* 1:1 mapping between rxq and interrupt. */
- intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ if (!rxq || !rxq_ctrl->channel) {
+ ret = EINVAL;
+ } else {
+ ibv_mlx5_exp_update_cq_ci(rxq_ctrl->cq, rxq->cq_ci);
+ ret = ibv_req_notify_cq(rxq_ctrl->cq, 0);
}
- return 0;
+ if (ret)
+ WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
+ return -ret;
}
/**
- * Destroy init interrupt vector array.
+ * DPDK callback for Rx queue interrupt disable.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Rx queue number.
*
* @return
* 0 on success, negative on failure.
*/
-void
-priv_destroy_intr_vec(struct priv *priv)
+int
+mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
- rte_free(intr_handle->intr_vec);
+ if (!rxq || !rxq_ctrl->channel) {
+ ret = EINVAL;
+ } else {
+ ret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq_ctrl->cq)
+ ret = EINVAL;
+ }
+ if (ret)
+ WARN("unable to disable interrupt on rx queue %d",
+ rx_queue_id);
+ else
+ ibv_ack_cq_events(rxq_ctrl->cq, 1);
+ return -ret;
}
+
+#endif /* HAVE_UPDATE_CQ_CI */
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index de6e0fa4..b07bcd11 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -69,129 +69,131 @@
#include "mlx5_defs.h"
#include "mlx5_prm.h"
-static inline int
-check_cqe(volatile struct mlx5_cqe *cqe,
- unsigned int cqes_n, const uint16_t ci)
- __attribute__((always_inline));
-
-static inline void
-txq_complete(struct txq *txq) __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
- __attribute__((always_inline));
-
-static inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
- __attribute__((always_inline));
-
-static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
- __attribute__((always_inline));
-
-static inline int
+static __rte_always_inline int
mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash)
- __attribute__((always_inline));
+ uint16_t cqe_cnt, uint32_t *rss_hash);
-static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
- __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
-#ifndef NDEBUG
+uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
+ [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
+};
/**
- * Verify or set magic value in CQE.
- *
- * @param cqe
- * Pointer to CQE.
+ * Build a table to translate Rx completion flags to packet type.
*
- * @return
- * 0 the first time.
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
*/
-static inline int
-check_cqe_seen(volatile struct mlx5_cqe *cqe)
+void
+mlx5_set_ptype_table(void)
{
- static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
- int ret = 1;
unsigned int i;
+ uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
- for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
- if (!ret || (*buf)[i] != magic[i]) {
- ret = 0;
- (*buf)[i] = magic[i];
- }
- return ret;
-}
-
-#endif /* NDEBUG */
-
-/**
- * Check whether CQE is valid.
- *
- * @param cqe
- * Pointer to CQE.
- * @param cqes_n
- * Size of completion queue.
- * @param ci
- * Consumer index.
- *
- * @return
- * 0 on success, 1 on failure.
- */
-static inline int
-check_cqe(volatile struct mlx5_cqe *cqe,
- unsigned int cqes_n, const uint16_t ci)
-{
- uint16_t idx = ci & cqes_n;
- uint8_t op_own = cqe->op_own;
- uint8_t op_owner = MLX5_CQE_OWNER(op_own);
- uint8_t op_code = MLX5_CQE_OPCODE(op_own);
-
- if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
- return 1; /* No CQE. */
-#ifndef NDEBUG
- if ((op_code == MLX5_CQE_RESP_ERR) ||
- (op_code == MLX5_CQE_REQ_ERR)) {
- volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
- uint8_t syndrome = err_cqe->syndrome;
-
- if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
- (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
- return 0;
- if (!check_cqe_seen(cqe))
- ERROR("unexpected CQE error %u (0x%02x)"
- " syndrome 0x%02x",
- op_code, op_code, syndrome);
- return 1;
- } else if ((op_code != MLX5_CQE_RESP_SEND) &&
- (op_code != MLX5_CQE_REQ)) {
- if (!check_cqe_seen(cqe))
- ERROR("unexpected CQE opcode %u (0x%02x)",
- op_code, op_code);
- return 1;
- }
-#endif /* NDEBUG */
- return 0;
-}
-
-/**
- * Return the address of the WQE.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe_ci
- * WQE consumer index.
- *
- * @return
- * WQE address.
- */
-static inline uintptr_t *
-tx_mlx5_wqe(struct txq *txq, uint16_t ci)
-{
- ci &= ((1 << txq->wqe_n) - 1);
- return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+ /* Last entry must not be overwritten, reserved for errored packet. */
+ for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
+ (*p)[i] = RTE_PTYPE_UNKNOWN;
+ /*
+ * The index to the array should have:
+ * bit[1:0] = l3_hdr_type
+ * bit[4:2] = l4_hdr_type
+ * bit[5] = ip_frag
+ * bit[6] = tunneled
+ * bit[7] = outer_l3_type
+ */
+ /* L3 */
+ (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ /* Fragmented */
+ (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ /* TCP */
+ (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* UDP */
+ (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ /* Repeat with outer_l3_type being set. Just in case. */
+ (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG;
+ (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG;
+ (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ /* Tunneled - L3 */
+ (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG;
+ /* Tunneled - Fragmented */
+ (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* Tunneled - TCP */
+ (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ /* Tunneled - UDP */
+ (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
+ (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP;
}
/**
@@ -251,156 +253,6 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
}
/**
- * Manage TX completions.
- *
- * When sending a burst, mlx5_tx_burst() posts several WRs.
- *
- * @param txq
- * Pointer to TX queue structure.
- */
-static inline void
-txq_complete(struct txq *txq)
-{
- const unsigned int elts_n = 1 << txq->elts_n;
- const unsigned int cqe_n = 1 << txq->cqe_n;
- const unsigned int cqe_cnt = cqe_n - 1;
- uint16_t elts_free = txq->elts_tail;
- uint16_t elts_tail;
- uint16_t cq_ci = txq->cq_ci;
- volatile struct mlx5_cqe *cqe = NULL;
- volatile struct mlx5_wqe_ctrl *ctrl;
-
- do {
- volatile struct mlx5_cqe *tmp;
-
- tmp = &(*txq->cqes)[cq_ci & cqe_cnt];
- if (check_cqe(tmp, cqe_n, cq_ci))
- break;
- cqe = tmp;
-#ifndef NDEBUG
- if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) {
- if (!check_cqe_seen(cqe))
- ERROR("unexpected compressed CQE, TX stopped");
- return;
- }
- if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
- (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
- if (!check_cqe_seen(cqe))
- ERROR("unexpected error CQE, TX stopped");
- return;
- }
-#endif /* NDEBUG */
- ++cq_ci;
- } while (1);
- if (unlikely(cqe == NULL))
- return;
- txq->wqe_pi = ntohs(cqe->wqe_counter);
- ctrl = (volatile struct mlx5_wqe_ctrl *)
- tx_mlx5_wqe(txq, txq->wqe_pi);
- elts_tail = ctrl->ctrl3;
- assert(elts_tail < (1 << txq->wqe_n));
- /* Free buffers. */
- while (elts_free != elts_tail) {
- struct rte_mbuf *elt = (*txq->elts)[elts_free];
- unsigned int elts_free_next =
- (elts_free + 1) & (elts_n - 1);
- struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next];
-
-#ifndef NDEBUG
- /* Poisoning. */
- memset(&(*txq->elts)[elts_free],
- 0x66,
- sizeof((*txq->elts)[elts_free]));
-#endif
- RTE_MBUF_PREFETCH_TO_FREE(elt_next);
- /* Only one segment needs to be freed. */
- rte_pktmbuf_free_seg(elt);
- elts_free = elts_free_next;
- }
- txq->cq_ci = cq_ci;
- txq->elts_tail = elts_tail;
- /* Update the consumer index. */
- rte_wmb();
- *txq->cq_db = htonl(cq_ci);
-}
-
-/**
- * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
- *
- * @param buf
- * Pointer to mbuf.
- *
- * @return
- * Memory pool where data is located for given mbuf.
- */
-static struct rte_mempool *
-txq_mb2mp(struct rte_mbuf *buf)
-{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
-}
-
-/**
- * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
- * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
- * remove an entry first.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- *
- * @return
- * mr->lkey on success, (uint32_t)-1 on failure.
- */
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
-{
- unsigned int i;
- uint32_t lkey = (uint32_t)-1;
-
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mp == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i].mp == mp) {
- assert(txq->mp2mr[i].lkey != (uint32_t)-1);
- assert(htonl(txq->mp2mr[i].mr->lkey) ==
- txq->mp2mr[i].lkey);
- lkey = txq->mp2mr[i].lkey;
- break;
- }
- }
- if (unlikely(lkey == (uint32_t)-1))
- lkey = txq_mp2mr_reg(txq, mp, i);
- return lkey;
-}
-
-/**
- * Ring TX queue doorbell.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe
- * Pointer to the last WQE posted in the NIC.
- */
-static inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
-{
- uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
- volatile uint64_t *src = ((volatile uint64_t *)wqe);
-
- rte_wmb();
- *txq->qp_db = htonl(txq->wqe_ci);
- /* Ensure ordering between DB record and BF copy. */
- rte_wmb();
- *dst = *src;
-}
-
-/**
* DPDK callback to check the status of a tx descriptor.
*
* @param tx_queue
@@ -415,12 +267,10 @@ int
mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
struct txq *txq = tx_queue;
- const unsigned int elts_n = 1 << txq->elts_n;
- const unsigned int elts_cnt = elts_n - 1;
- unsigned int used;
+ uint16_t used;
- txq_complete(txq);
- used = (txq->elts_head - txq->elts_tail) & elts_cnt;
+ mlx5_tx_complete(txq);
+ used = txq->elts_head - txq->elts_tail;
if (offset < used)
return RTE_ETH_TX_DESC_FULL;
return RTE_ETH_TX_DESC_DONE;
@@ -494,11 +344,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
unsigned int k = 0;
- unsigned int max;
+ uint16_t max_elts;
unsigned int max_inline = txq->max_inline;
const unsigned int inline_en = !!max_inline && txq->inline_en;
uint16_t max_wqe;
@@ -514,10 +365,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Prefetch first packet cacheline. */
rte_prefetch0(*pkts);
/* Start processing. */
- txq_complete(txq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -533,6 +382,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ehdr;
uint8_t cs_flags = 0;
uint64_t tso = 0;
+ uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
@@ -545,9 +395,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max < segs_n + 1)
+ if (max_elts < segs_n)
break;
- max -= segs_n;
+ max_elts -= segs_n;
--segs_n;
if (unlikely(--max_wqe == 0))
break;
@@ -566,7 +416,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (length < (MLX5_WQE_DWORD_SIZE + 2))
break;
/* Update element. */
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head & elts_m] = buf;
/* Prefetch next buffer data. */
if (pkts_n - i > 1)
rte_prefetch0(
@@ -628,6 +478,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
tso_header_sz = buf->l2_len + vlan_sz +
buf->l3_len + buf->l4_len;
+ tso_segsz = buf->tso_segsz;
if (is_tunneled && txq->tunnel_en) {
tso_header_sz += buf->outer_l2_len +
@@ -762,7 +613,7 @@ use_dseg:
naddr = htonll(addr);
*dseg = (rte_v128u32_t){
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
@@ -801,12 +652,11 @@ next_seg:
naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
- elts_head = (elts_head + 1) & (elts_n - 1);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[++elts_head & elts_m] = buf;
++sg;
/* Advance counter only if all segs are successfully posted. */
if (sg < segs_n)
@@ -814,7 +664,7 @@ next_seg:
else
j += sg;
next_pkt:
- elts_head = (elts_head + 1) & (elts_n - 1);
+ ++elts_head;
++pkts;
++i;
/* Initialize known and common part of the WQE structure. */
@@ -827,7 +677,7 @@ next_pkt:
};
wqe->eseg = (rte_v128u32_t){
0,
- cs_flags | (htons(buf->tso_segsz) << 16),
+ cs_flags | (htons(tso_segsz) << 16),
0,
(ehdr << 16) | htons(tso_header_sz),
};
@@ -857,7 +707,7 @@ next_wqe:
/* Take a shortcut if nothing must be sent. */
if (unlikely((i + k) == 0))
return 0;
- txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1);
+ txq->elts_head += (i + j);
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
@@ -964,10 +814,11 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
- unsigned int max;
+ uint16_t max_elts;
uint16_t max_wqe;
unsigned int comp;
struct mlx5_mpw mpw = {
@@ -980,16 +831,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
- txq_complete(txq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
do {
struct rte_mbuf *buf = *(pkts++);
- unsigned int elts_head_next;
uint32_t length;
unsigned int segs_n = buf->nb_segs;
uint32_t cs_flags = 0;
@@ -999,12 +847,12 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max < segs_n + 1)
+ if (max_elts < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
if (segs_n > MLX5_MPW_DSEG_MAX)
break;
- max -= segs_n;
+ max_elts -= segs_n;
--pkts_n;
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
@@ -1040,17 +888,15 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe_data_seg *dseg;
uintptr_t addr;
- elts_head_next = (elts_head + 1) & (elts_n - 1);
assert(buf);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
};
- elts_head = elts_head_next;
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
#endif
@@ -1061,7 +907,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
assert(length == mpw.len);
if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
mlx5_mpw_close(txq, &mpw);
- elts_head = elts_head_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += length;
@@ -1179,10 +1024,11 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
- unsigned int max;
+ uint16_t max_elts;
uint16_t max_wqe;
unsigned int comp;
unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
@@ -1208,13 +1054,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
- txq_complete(txq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
do {
struct rte_mbuf *buf = *(pkts++);
- unsigned int elts_head_next;
uintptr_t addr;
uint32_t length;
unsigned int segs_n = buf->nb_segs;
@@ -1225,12 +1068,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max < segs_n + 1)
+ if (max_elts < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
if (segs_n > MLX5_MPW_DSEG_MAX)
break;
- max -= segs_n;
+ max_elts -= segs_n;
--pkts_n;
/*
* Compute max_wqe in case less WQE were consumed in previous
@@ -1291,18 +1134,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
do {
volatile struct mlx5_wqe_data_seg *dseg;
- elts_head_next =
- (elts_head + 1) & (elts_n - 1);
assert(buf);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
};
- elts_head = elts_head_next;
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
#endif
@@ -1319,9 +1159,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
assert(length <= inline_room);
assert(length == DATA_LEN(buf));
- elts_head_next = (elts_head + 1) & (elts_n - 1);
addr = rte_pktmbuf_mtod(buf, uintptr_t);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
/* Maximum number of bytes before wrapping. */
max = ((((uintptr_t)(txq->wqes)) +
(1 << txq->wqe_n) *
@@ -1358,7 +1197,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
inline_room -= length;
}
}
- elts_head = elts_head_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += length;
@@ -1480,10 +1318,11 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = 1 << txq->elts_n;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
unsigned int i = 0;
unsigned int j = 0;
- unsigned int max_elts;
+ uint16_t max_elts;
uint16_t max_wqe;
unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
unsigned int mpw_room = 0;
@@ -1496,10 +1335,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(!pkts_n))
return 0;
/* Start processing. */
- txq_complete(txq);
+ mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- if (max_elts > elts_n)
- max_elts -= elts_n;
/* A CQE slot must always be available. */
assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
@@ -1507,7 +1344,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
return 0;
do {
struct rte_mbuf *buf = *(pkts++);
- unsigned int elts_head_next;
uintptr_t addr;
uint64_t naddr;
unsigned int n;
@@ -1521,7 +1357,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* that one ring entry remains unused.
*/
assert(segs_n);
- if (max_elts - j < segs_n + 1)
+ if (max_elts - j < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
if (segs_n > MLX5_MPW_DSEG_MAX)
@@ -1605,18 +1441,15 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
do {
volatile struct mlx5_wqe_data_seg *dseg;
- elts_head_next =
- (elts_head + 1) & (elts_n - 1);
assert(buf);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
};
- elts_head = elts_head_next;
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
#endif
@@ -1667,7 +1500,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* No need to get completion as the entire packet is
* copied to WQ. Free the buf right away.
*/
- elts_head_next = elts_head;
rte_pktmbuf_free_seg(buf);
mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
/* Add pad in the next packet if any. */
@@ -1690,8 +1522,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
dseg = (volatile void *)
((uintptr_t)mpw.data.raw +
inl_pad);
- elts_head_next = (elts_head + 1) & (elts_n - 1);
- (*txq->elts)[elts_head] = buf;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
addr = rte_pktmbuf_mtod(buf, uintptr_t);
for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
rte_prefetch2((void *)(addr +
@@ -1699,7 +1530,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
naddr = htonll(addr);
*dseg = (rte_v128u32_t) {
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
@@ -1710,7 +1541,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
mpw_room -= (inl_pad + sizeof(*dseg));
inl_pad = 0;
}
- elts_head = elts_head_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += length;
@@ -1764,30 +1594,20 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
static inline uint32_t
rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
{
- uint32_t pkt_type;
- uint16_t flags = ntohs(cqe->hdr_type_etc);
+ uint8_t idx;
+ uint8_t pinfo = cqe->pkt_info;
+ uint16_t ptype = cqe->hdr_type_etc;
- if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) {
- pkt_type =
- TRANSPOSE(flags,
- MLX5_CQE_RX_IPV4_PACKET,
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- MLX5_CQE_RX_IPV6_PACKET,
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
- pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ?
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
- } else {
- pkt_type =
- TRANSPOSE(flags,
- MLX5_CQE_L3_HDR_TYPE_IPV6,
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- MLX5_CQE_L3_HDR_TYPE_IPV4,
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
- }
- return pkt_type;
+ /*
+ * The index to the array should have:
+ * bit[1:0] = l3_hdr_type
+ * bit[4:2] = l4_hdr_type
+ * bit[5] = ip_frag
+ * bit[6] = tunneled
+ * bit[7] = outer_l3_type
+ */
+ idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
+ return mlx5_ptype_table[idx];
}
/**
@@ -1819,7 +1639,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
if (zip->ai) {
volatile struct mlx5_mini_cqe8 (*mc)[8] =
(volatile struct mlx5_mini_cqe8 (*)[8])
- (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt]);
+ (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
len = ntohl((*mc)[zip->ai & 7].byte_cnt);
*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
@@ -1867,7 +1687,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
volatile struct mlx5_mini_cqe8 (*mc)[8] =
(volatile struct mlx5_mini_cqe8 (*)[8])
(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
- cqe_cnt]);
+ cqe_cnt].pkt_info);
/* Fix endianness. */
zip->cqe_cnt = ntohl(cqe->byte_cnt);
@@ -2018,7 +1838,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt = seg;
assert(len >= (rxq->crc_present << 2));
/* Update packet information. */
- pkt->packet_type = 0;
+ pkt->packet_type = rxq_cq_to_pkt_type(cqe);
pkt->ol_flags = 0;
if (rss_hash_res && rxq->rss_hash) {
pkt->hash.rss = rss_hash_res;
@@ -2036,10 +1856,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
mlx5_flow_mark_get(mark);
}
}
- if (rxq->csum | rxq->csum_l2tun) {
- pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ if (rxq->csum | rxq->csum_l2tun)
pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
- }
if (rxq->vlan_strip &&
(cqe->hdr_type_etc &
htons(MLX5_CQE_VLAN_STRIPPED))) {
@@ -2054,9 +1872,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
DATA_LEN(rep) = DATA_LEN(seg);
PKT_LEN(rep) = PKT_LEN(seg);
SET_DATA_OFF(rep, DATA_OFF(seg));
- NB_SEGS(rep) = NB_SEGS(seg);
PORT(rep) = PORT(seg);
- NEXT(rep) = NULL;
(*rxq->elts)[idx] = rep;
/*
* Fill NIC descriptor with the new buffer. The lkey and size
@@ -2151,75 +1967,70 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
return 0;
}
-/**
- * DPDK callback for rx queue interrupt enable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rx_queue_id
- * RX queue number
- *
- * @return
- * 0 on success, negative on failure.
+/*
+ * Vectorized Rx/Tx routines are not compiled in when required vector
+ * instructions are not supported on a target architecture. The following null
+ * stubs are needed for linkage when those are not included outside of this file
+ * (e.g. mlx5_rxtx_vec_sse.c for x86).
*/
-int
-mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+
+uint16_t __attribute__((weak))
+mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
-#ifdef HAVE_UPDATE_CQ_CI
- struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct ibv_cq *cq = rxq_ctrl->cq;
- uint16_t ci = rxq->cq_ci;
- int ret = 0;
-
- ibv_mlx5_exp_update_cq_ci(cq, ci);
- ret = ibv_req_notify_cq(cq, 0);
-#else
- int ret = -1;
- (void)dev;
- (void)rx_queue_id;
-#endif
- if (ret)
- WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
- return ret;
+ (void)dpdk_txq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
}
-/**
- * DPDK callback for rx queue interrupt disable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rx_queue_id
- * RX queue number
- *
- * @return
- * 0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+uint16_t __attribute__((weak))
+mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
-#ifdef HAVE_UPDATE_CQ_CI
- struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct ibv_cq *cq = rxq_ctrl->cq;
- struct ibv_cq *ev_cq;
- void *ev_ctx;
- int ret = 0;
-
- ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
- if (ret || ev_cq != cq)
- ret = -1;
- else
- ibv_ack_cq_events(cq, 1);
-#else
- int ret = -1;
- (void)dev;
- (void)rx_queue_id;
-#endif
- if (ret)
- WARN("unable to disable interrupt on rx queue %d",
- rx_queue_id);
- return ret;
+ (void)dpdk_txq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ (void)dpdk_rxq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
+
+int __attribute__((weak))
+priv_check_raw_vec_tx_support(struct priv *priv)
+{
+ (void)priv;
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+priv_check_vec_tx_support(struct priv *priv)
+{
+ (void)priv;
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+rxq_check_vec_support(struct rxq *rxq)
+{
+ (void)rxq;
+ return -ENOTSUP;
+}
+
+int __attribute__((weak))
+priv_check_vec_rx_support(struct priv *priv)
+{
+ (void)priv;
+ return -ENOTSUP;
+}
+
+void __attribute__((weak))
+priv_prep_vec_rx_function(struct priv *priv)
+{
+ (void)priv;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 8db8eb14..7de1d108 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -115,10 +115,13 @@ struct rxq {
unsigned int port_id:8;
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int :8; /* Remaining bits. */
+ unsigned int pending_err:1; /* CQE error needs to be handled. */
+ unsigned int trim_elts:1; /* Whether elts needs clean-up. */
+ unsigned int :6; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t rq_ci;
+ uint16_t rq_pi;
uint16_t cq_ci;
volatile struct mlx5_wqe_data_seg(*wqes)[];
volatile struct mlx5_cqe(*cqes)[];
@@ -126,6 +129,8 @@ struct rxq {
struct rte_mbuf *(*elts)[];
struct rte_mempool *mp;
struct mlx5_rxq_stats stats;
+ uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
+ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
} __rte_cache_aligned;
/* RX queue control descriptor. */
@@ -240,10 +245,10 @@ struct hash_rxq {
};
/* TX queue descriptor. */
-RTE_STD_C11
+__extension__
struct txq {
- uint16_t elts_head; /* Current index in (*elts)[]. */
- uint16_t elts_tail; /* First element awaiting completion. */
+ uint16_t elts_head; /* Current counter in (*elts)[]. */
+ uint16_t elts_tail; /* Counter of first element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
@@ -261,16 +266,19 @@ struct txq {
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
+ uint32_t flags; /* Flags for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register. */
struct {
- const struct rte_mempool *mp; /* Cached Memory Pool. */
+ uintptr_t start; /* Start address of MR */
+ uintptr_t end; /* End address of MR */
struct ibv_mr *mr; /* Memory Region (for mp). */
uint32_t lkey; /* htonl(mr->lkey) */
} mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
+ uint16_t mr_cache_idx; /* Index of last hit entry. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
} __rte_cache_aligned;
@@ -298,19 +306,17 @@ int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
-int priv_intr_efd_enable(struct priv *priv);
-void priv_intr_efd_disable(struct priv *priv);
-int priv_create_intr_vec(struct priv *priv);
-void priv_destroy_intr_vec(struct priv *priv);
void rxq_cleanup(struct rxq_ctrl *);
-int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
-int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
- unsigned int, const struct rte_eth_rxconf *,
- struct rte_mempool *);
int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_rxconf *, struct rte_mempool *);
void mlx5_rx_queue_release(void *);
uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
+int priv_rx_intr_vec_enable(struct priv *priv);
+void priv_rx_intr_vec_disable(struct priv *priv);
+#ifdef HAVE_UPDATE_CQ_CI
+int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+#endif /* HAVE_UPDATE_CQ_CI */
/* mlx5_txq.c */
@@ -324,6 +330,9 @@ uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
/* mlx5_rxtx.c */
+extern uint32_t mlx5_ptype_table[];
+
+void mlx5_set_ptype_table(void);
uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
@@ -333,8 +342,16 @@ uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t);
-int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+/* Vectorized version of mlx5_rxtx.c */
+int priv_check_raw_vec_tx_support(struct priv *);
+int priv_check_vec_tx_support(struct priv *);
+int rxq_check_vec_support(struct rxq *);
+int priv_check_vec_rx_support(struct priv *);
+void priv_prep_vec_rx_function(struct priv *);
+uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
/* mlx5_mr.c */
@@ -342,4 +359,254 @@ struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
void txq_mp2mr_iter(struct rte_mempool *, void *);
uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
+#ifndef NDEBUG
+/**
+ * Verify or set magic value in CQE.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ *
+ * @return
+ * 0 the first time.
+ */
+static inline int
+check_cqe_seen(volatile struct mlx5_cqe *cqe)
+{
+ static const uint8_t magic[] = "seen";
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
+ int ret = 1;
+ unsigned int i;
+
+ for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
+ if (!ret || (*buf)[i] != magic[i]) {
+ ret = 0;
+ (*buf)[i] = magic[i];
+ }
+ return ret;
+}
+#endif /* NDEBUG */
+
+/**
+ * Check whether CQE is valid.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ * @param cqes_n
+ * Size of completion queue.
+ * @param ci
+ * Consumer index.
+ *
+ * @return
+ * 0 on success, 1 on failure.
+ */
+static __rte_always_inline int
+check_cqe(volatile struct mlx5_cqe *cqe,
+ unsigned int cqes_n, const uint16_t ci)
+{
+ uint16_t idx = ci & cqes_n;
+ uint8_t op_own = cqe->op_own;
+ uint8_t op_owner = MLX5_CQE_OWNER(op_own);
+ uint8_t op_code = MLX5_CQE_OPCODE(op_own);
+
+ if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
+ return 1; /* No CQE. */
+#ifndef NDEBUG
+ if ((op_code == MLX5_CQE_RESP_ERR) ||
+ (op_code == MLX5_CQE_REQ_ERR)) {
+ volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
+ uint8_t syndrome = err_cqe->syndrome;
+
+ if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
+ (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
+ return 0;
+ if (!check_cqe_seen(cqe))
+ ERROR("unexpected CQE error %u (0x%02x)"
+ " syndrome 0x%02x",
+ op_code, op_code, syndrome);
+ return 1;
+ } else if ((op_code != MLX5_CQE_RESP_SEND) &&
+ (op_code != MLX5_CQE_REQ)) {
+ if (!check_cqe_seen(cqe))
+ ERROR("unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
+ return 1;
+ }
+#endif /* NDEBUG */
+ return 0;
+}
+
+/**
+ * Return the address of the WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe_ci
+ * WQE consumer index.
+ *
+ * @return
+ * WQE address.
+ */
+static inline uintptr_t *
+tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+{
+ ci &= ((1 << txq->wqe_n) - 1);
+ return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+}
+
+/**
+ * Manage TX completions.
+ *
+ * When sending a burst, mlx5_tx_burst() posts several WRs.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ */
+static __rte_always_inline void
+mlx5_tx_complete(struct txq *txq)
+{
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int cqe_n = 1 << txq->cqe_n;
+ const unsigned int cqe_cnt = cqe_n - 1;
+ uint16_t elts_free = txq->elts_tail;
+ uint16_t elts_tail;
+ uint16_t cq_ci = txq->cq_ci;
+ volatile struct mlx5_cqe *cqe = NULL;
+ volatile struct mlx5_wqe_ctrl *ctrl;
+ struct rte_mbuf *m, *free[elts_n];
+ struct rte_mempool *pool = NULL;
+ unsigned int blk_n = 0;
+
+ cqe = &(*txq->cqes)[cq_ci & cqe_cnt];
+ if (unlikely(check_cqe(cqe, cqe_n, cq_ci)))
+ return;
+#ifndef NDEBUG
+ if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
+ (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
+ if (!check_cqe_seen(cqe))
+ ERROR("unexpected error CQE, TX stopped");
+ return;
+ }
+#endif /* NDEBUG */
+ ++cq_ci;
+ txq->wqe_pi = ntohs(cqe->wqe_counter);
+ ctrl = (volatile struct mlx5_wqe_ctrl *)
+ tx_mlx5_wqe(txq, txq->wqe_pi);
+ elts_tail = ctrl->ctrl3;
+ assert((elts_tail & elts_m) < (1 << txq->wqe_n));
+ /* Free buffers. */
+ while (elts_free != elts_tail) {
+ m = rte_pktmbuf_prefree_seg((*txq->elts)[elts_free++ & elts_m]);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == pool)) {
+ free[blk_n++] = m;
+ } else {
+ if (likely(pool != NULL))
+ rte_mempool_put_bulk(pool,
+ (void *)free,
+ blk_n);
+ free[0] = m;
+ pool = m->pool;
+ blk_n = 1;
+ }
+ }
+ }
+ if (blk_n)
+ rte_mempool_put_bulk(pool, (void *)free, blk_n);
+#ifndef NDEBUG
+ elts_free = txq->elts_tail;
+ /* Poisoning. */
+ while (elts_free != elts_tail) {
+ memset(&(*txq->elts)[elts_free & elts_m],
+ 0x66,
+ sizeof((*txq->elts)[elts_free & elts_m]));
+ ++elts_free;
+ }
+#endif
+ txq->cq_ci = cq_ci;
+ txq->elts_tail = elts_tail;
+ /* Update the consumer index. */
+ rte_wmb();
+ *txq->cq_db = htonl(cq_ci);
+}
+
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
+ * the cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx5_tx_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
+/**
+ * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
+ * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
+ * remove an entry first.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param[in] mp
+ * Memory Pool for which a Memory Region lkey must be returned.
+ *
+ * @return
+ * mr->lkey on success, (uint32_t)-1 on failure.
+ */
+static __rte_always_inline uint32_t
+mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+{
+ uint16_t i = txq->mr_cache_idx;
+ uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
+
+ assert(i < RTE_DIM(txq->mp2mr));
+ if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr))
+ return txq->mp2mr[i].lkey;
+ for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
+ if (unlikely(txq->mp2mr[i].mr == NULL)) {
+ /* Unknown MP, add a new MR for it. */
+ break;
+ }
+ if (txq->mp2mr[i].start <= addr &&
+ txq->mp2mr[i].end >= addr) {
+ assert(txq->mp2mr[i].lkey != (uint32_t)-1);
+ assert(htonl(txq->mp2mr[i].mr->lkey) ==
+ txq->mp2mr[i].lkey);
+ txq->mr_cache_idx = i;
+ return txq->mp2mr[i].lkey;
+ }
+ }
+ txq->mr_cache_idx = 0;
+ return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+}
+
+/**
+ * Ring TX queue doorbell.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the last WQE posted in the NIC.
+ */
+static __rte_always_inline void
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+{
+ uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
+ volatile uint64_t *src = ((volatile uint64_t *)wqe);
+
+ rte_wmb();
+ *txq->qp_db = htonl(txq->wqe_ci);
+ /* Ensure ordering between DB record and BF copy. */
+ rte_wmb();
+ *dst = *src;
+}
+
#endif /* RTE_PMD_MLX5_RXTX_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
new file mode 100644
index 00000000..8560f745
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
@@ -0,0 +1,1417 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <smmintrin.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5_hw.h>
+#include <infiniband/arch.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+/* DPDK headers don't like -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/**
+ * Fill in buffer descriptors in a multi-packet send descriptor.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param dseg
+ * Pointer to buffer descriptor to be writen.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param n
+ * Number of packets to be filled.
+ */
+static inline void
+txq_wr_dseg_v(struct txq *txq, __m128i *dseg,
+ struct rte_mbuf **pkts, unsigned int n)
+{
+ unsigned int pos;
+ uintptr_t addr;
+ const __m128i shuf_mask_dseg =
+ _mm_set_epi8(8, 9, 10, 11, /* addr, bswap64 */
+ 12, 13, 14, 15,
+ 7, 6, 5, 4, /* lkey */
+ 0, 1, 2, 3 /* length, bswap32 */);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t tx_byte = 0;
+#endif
+
+ for (pos = 0; pos < n; ++pos, ++dseg) {
+ __m128i desc;
+ struct rte_mbuf *pkt = pkts[pos];
+
+ addr = rte_pktmbuf_mtod(pkt, uintptr_t);
+ desc = _mm_set_epi32(addr >> 32,
+ addr,
+ mlx5_tx_mb2mr(txq, pkt),
+ DATA_LEN(pkt));
+ desc = _mm_shuffle_epi8(desc, shuf_mask_dseg);
+ _mm_store_si128(dseg, desc);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tx_byte += DATA_LEN(pkt);
+#endif
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.obytes += tx_byte;
+#endif
+}
+
+/**
+ * Count the number of continuous single segment packets. The first packet must
+ * be a single segment packet.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of continuous single segment packets.
+ */
+static inline unsigned int
+txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ assert(NB_SEGS(pkts[0]) == 1);
+ /* Count the number of continuous single segment packets. */
+ for (pos = 1; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) > 1)
+ break;
+ return pos;
+}
+
+/**
+ * Count the number of packets having same ol_flags and calculate cs_flags.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ * @param cs_flags
+ * Pointer of flags to be returned.
+ *
+ * @return
+ * Number of packets having same ol_flags.
+ */
+static inline unsigned int
+txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint8_t *cs_flags)
+{
+ unsigned int pos;
+ const uint64_t ol_mask =
+ PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of packets having same ol_flags. */
+ for (pos = 1; pos < pkts_n; ++pos)
+ if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
+ break;
+ /* Should open another MPW session for the rest. */
+ if (pkts[0]->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ const uint64_t is_tunneled =
+ pkts[0]->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ *cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
+ }
+ return pos;
+}
+
+/**
+ * Send multi-segmented packets until it encounters a single segment packet in
+ * the pkts list.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static uint16_t
+txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n;
+ volatile struct mlx5_wqe *wqe = NULL;
+
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ if (unlikely(!pkts_n))
+ return 0;
+ for (n = 0; n < pkts_n; ++n) {
+ struct rte_mbuf *buf = pkts[n];
+ unsigned int segs_n = buf->nb_segs;
+ unsigned int ds = nb_dword_in_hdr;
+ unsigned int len = PKT_LEN(buf);
+ uint16_t wqe_ci = txq->wqe_ci;
+ const __m128i shuf_mask_ctrl =
+ _mm_set_epi8(15, 14, 13, 12,
+ 8, 9, 10, 11, /* bswap32 */
+ 4, 5, 6, 7, /* bswap32 */
+ 0, 1, 2, 3 /* bswap32 */);
+ uint8_t cs_flags = 0;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ __m128i *t_wqe, *dseg;
+ __m128i ctrl;
+
+ assert(segs_n);
+ max_elts = elts_n - (elts_head - txq->elts_tail);
+ max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
+ /*
+ * A MPW session consumes 2 WQEs at most to
+ * include MLX5_MPW_DSEG_MAX pointers.
+ */
+ if (segs_n == 1 ||
+ max_elts < segs_n || max_wqe < 2)
+ break;
+ wqe = &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[wqe_ci & wq_mask].hdr;
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ const uint64_t is_tunneled = buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
+ }
+ /* Title WQEBB pointer. */
+ t_wqe = (__m128i *)wqe;
+ dseg = (__m128i *)(wqe + 1);
+ do {
+ if (!(ds++ % nb_dword_per_wqebb)) {
+ dseg = (__m128i *)
+ &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[++wqe_ci & wq_mask];
+ }
+ txq_wr_dseg_v(txq, dseg++, &buf, 1);
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ buf = buf->next;
+ } while (--segs_n);
+ ++wqe_ci;
+ /* Fill CTRL in the header. */
+ ctrl = _mm_set_epi32(0, 0, txq->qp_num_8s | ds,
+ MLX5_OPC_MOD_MPW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_TSO);
+ ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
+ _mm_store_si128(t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ _mm_store_si128(t_wqe + 1,
+ _mm_set_epi16(0, 0, 0, 0,
+ htons(len), cs_flags,
+ 0, 0));
+ txq->wqe_ci = wqe_ci;
+ }
+ if (!n)
+ return 0;
+ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
+ txq->elts_head = elts_head;
+ if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[3] = txq->elts_head;
+ txq->elts_comp = 0;
+ ++txq->cq_pi;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += n;
+#endif
+ mlx5_tx_dbrec(txq, wqe);
+ return n;
+}
+
+/**
+ * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
+ * it returns to make it processed by txq_scatter_v(). All the packets in
+ * the pkts list should be single segment packets having same offload flags.
+ * This must be checked by txq_check_multiseg() and txq_calc_offload().
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
+ * @param cs_flags
+ * Checksum offload flags to be written in the descriptor.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static inline uint16_t
+txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint8_t cs_flags)
+{
+ struct rte_mbuf **elts;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n = 0;
+ unsigned int pos;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ uint32_t comp_req = 0;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ uint16_t wq_idx = txq->wqe_ci & wq_mask;
+ volatile struct mlx5_wqe64 *wq =
+ &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
+ volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
+ const __m128i shuf_mask_ctrl =
+ _mm_set_epi8(15, 14, 13, 12,
+ 8, 9, 10, 11, /* bswap32 */
+ 4, 5, 6, 7, /* bswap32 */
+ 0, 1, 2, 3 /* bswap32 */);
+ __m128i *t_wqe, *dseg;
+ __m128i ctrl;
+
+ /* Make sure all packets can fit into a single WQE. */
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ if (unlikely(!pkts_n))
+ return 0;
+ elts = &(*txq->elts)[elts_head & elts_m];
+ /* Loop for available tailroom first. */
+ n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
+ for (pos = 0; pos < (n & -2); pos += 2)
+ _mm_storeu_si128((__m128i *)&elts[pos],
+ _mm_loadu_si128((__m128i *)&pkts[pos]));
+ if (n & 1)
+ elts[pos] = pkts[pos];
+ /* Check if it crosses the end of the queue. */
+ if (unlikely(n < pkts_n)) {
+ elts = &(*txq->elts)[0];
+ for (pos = 0; pos < pkts_n - n; ++pos)
+ elts[pos] = pkts[n + pos];
+ }
+ txq->elts_head += pkts_n;
+ /* Save title WQEBB pointer. */
+ t_wqe = (__m128i *)wqe;
+ dseg = (__m128i *)(wqe + 1);
+ /* Calculate the number of entries to the end. */
+ n = RTE_MIN(
+ (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
+ pkts_n);
+ /* Fill DSEGs. */
+ txq_wr_dseg_v(txq, dseg, pkts, n);
+ /* Check if it crosses the end of the queue. */
+ if (n < pkts_n) {
+ dseg = (__m128i *)txq->wqes;
+ txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
+ }
+ if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
+ txq->elts_comp += pkts_n;
+ } else {
+ /* Request a completion. */
+ txq->elts_comp = 0;
+ ++txq->cq_pi;
+ comp_req = 8;
+ }
+ /* Fill CTRL in the header. */
+ ctrl = _mm_set_epi32(txq->elts_head, comp_req,
+ txq->qp_num_8s | (pkts_n + 2),
+ MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
+ ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
+ _mm_store_si128(t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ _mm_store_si128(t_wqe + 1,
+ _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, cs_flags,
+ 0, 0, 0, 0));
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += pkts_n;
+#endif
+ txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
+ nb_dword_per_wqebb;
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq, wqe);
+ return pkts_n;
+}
+
+/**
+ * DPDK callback for vectorized TX.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint16_t n;
+ uint16_t ret;
+
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * DPDK callback for vectorized TX with multi-seg packets and offload.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint8_t cs_flags = 0;
+ uint16_t n;
+ uint16_t ret;
+
+ /* Transmit multi-seg packets in the head of pkts list. */
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+ NB_SEGS(pkts[nb_tx]) > 1)
+ nb_tx += txq_scatter_v(txq,
+ &pkts[nb_tx],
+ pkts_n - nb_tx);
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+ n = txq_check_multiseg(&pkts[nb_tx], n);
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * Store free buffers to RX SW ring.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be stored.
+ * @param pkts_n
+ * Number of packets to be stored.
+ */
+static inline void
+rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n)
+{
+ const uint16_t q_mask = (1 << rxq->elts_n) - 1;
+ struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
+ unsigned int pos;
+ uint16_t p = n & -2;
+
+ for (pos = 0; pos < p; pos += 2) {
+ __m128i mbp;
+
+ mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
+ _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
+ }
+ if (n & 1)
+ pkts[pos] = elts[pos];
+}
+
+/**
+ * Replenish buffers for RX in bulk.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param n
+ * Number of buffers to be replenished.
+ */
+static inline void
+rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n)
+{
+ const uint16_t q_n = 1 << rxq->elts_n;
+ const uint16_t q_mask = q_n - 1;
+ const uint16_t elts_idx = rxq->rq_ci & q_mask;
+ struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
+ volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
+ unsigned int i;
+
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
+ if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
+ rxq->stats.rx_nombuf += n;
+ return;
+ }
+ for (i = 0; i < n; ++i)
+ wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ rxq->rq_ci += n;
+ rte_wmb();
+ *rxq->rq_db = htonl(rxq->rq_ci);
+}
+
+/**
+ * Decompress a compressed completion and fill in mbufs in RX SW ring with data
+ * extracted from the title completion descriptor.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cq
+ * Pointer to completion array having a compressed completion at first.
+ * @param elts
+ * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
+ * the title completion descriptor to be copied to the rest of mbufs.
+ */
+static inline void
+rxq_cq_decompress_v(struct rxq *rxq,
+ volatile struct mlx5_cqe *cq,
+ struct rte_mbuf **elts)
+{
+ volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
+ struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
+ unsigned int pos;
+ unsigned int i;
+ unsigned int inv = 0;
+ /* Mask to shuffle from extracted mini CQE to mbuf. */
+ const __m128i shuf_mask1 =
+ _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
+ -1, -1, /* skip vlan_tci */
+ 6, 7, /* data_len, bswap16 */
+ -1, -1, 6, 7, /* pkt_len, bswap16 */
+ -1, -1, -1, -1 /* skip packet_type */);
+ const __m128i shuf_mask2 =
+ _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
+ -1, -1, /* skip vlan_tci */
+ 14, 15, /* data_len, bswap16 */
+ -1, -1, 14, 15, /* pkt_len, bswap16 */
+ -1, -1, -1, -1 /* skip packet_type */);
+ /* Restore the compressed count. Must be 16 bits. */
+ const uint16_t mcqe_n = t_pkt->data_len +
+ (rxq->crc_present * ETHER_CRC_LEN);
+ const __m128i rearm =
+ _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
+ const __m128i rxdf =
+ _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
+ const __m128i crc_adj =
+ _mm_set_epi16(0, 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0, 0);
+ const uint32_t flow_tag = t_pkt->hash.fdir.hi;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ones = _mm_cmpeq_epi32(zero, zero);
+ uint32_t rcvd_byte = 0;
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const __m128i len_shuf_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 14, 15, 6, 7,
+ 10, 11, 2, 3);
+#endif
+
+ /* Compile time sanity check for this function. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+ /*
+ * A. load mCQEs into a 128bit register.
+ * B. store rearm data to mbuf.
+ * C. combine data from mCQEs with rx_descriptor_fields1.
+ * D. store rx_descriptor_fields1.
+ * E. store flow tag (rte_flow mark).
+ */
+ for (pos = 0; pos < mcqe_n; ) {
+ __m128i mcqe1, mcqe2;
+ __m128i rxdf1, rxdf2;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ __m128i byte_cnt, invalid_mask;
+#endif
+
+ if (!(pos & 0x7) && pos + 8 < mcqe_n)
+ rte_prefetch0((void *)(cq + pos + 8));
+ /* A.1 load mCQEs into a 128bit register. */
+ mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
+ mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
+ /* B.1 store rearm data to mbuf. */
+ _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
+ rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
+ rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
+ rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
+ rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
+ rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
+ /* D.1 store rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)
+ &elts[pos]->rx_descriptor_fields1,
+ rxdf1);
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 1]->rx_descriptor_fields1,
+ rxdf2);
+ /* B.1 store rearm data to mbuf. */
+ _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
+ rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
+ rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
+ rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
+ rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
+ rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
+ /* D.1 store rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 2]->rx_descriptor_fields1,
+ rxdf1);
+ _mm_storeu_si128((__m128i *)
+ &elts[pos + 3]->rx_descriptor_fields1,
+ rxdf2);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ invalid_mask = _mm_set_epi64x(0,
+ (mcqe_n - pos) *
+ sizeof(uint16_t) * 8);
+ invalid_mask = _mm_sll_epi64(ones, invalid_mask);
+ mcqe1 = _mm_srli_si128(mcqe1, 4);
+ byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
+ byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
+ byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
+ byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
+ rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
+#endif
+ if (rxq->mark) {
+ /* E.1 store flow tag (rte_flow mark). */
+ elts[pos]->hash.fdir.hi = flow_tag;
+ elts[pos + 1]->hash.fdir.hi = flow_tag;
+ elts[pos + 2]->hash.fdir.hi = flow_tag;
+ elts[pos + 3]->hash.fdir.hi = flow_tag;
+ }
+ pos += MLX5_VPMD_DESCS_PER_LOOP;
+ /* Move to next CQE and invalidate consumed CQEs. */
+ if (!(pos & 0x7) && pos < mcqe_n) {
+ mcq = (void *)(cq + pos);
+ for (i = 0; i < 8; ++i)
+ cq[inv++].op_own = MLX5_CQE_INVALIDATE;
+ }
+ }
+ /* Invalidate the rest of CQEs. */
+ for (; inv < mcqe_n; ++inv)
+ cq[inv].op_own = MLX5_CQE_INVALIDATE;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += mcqe_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ rxq->cq_ci += mcqe_n;
+}
+
+/**
+ * Calculate packet type and offload flag for mbuf and store it.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cqes[4]
+ * Array of four 16bytes completions extracted from the original completion
+ * descriptor.
+ * @param op_err
+ * Opcode vector having responder error status. Each field is 4B.
+ * @param pkts
+ * Pointer to array of packets to be filled.
+ */
+static inline void
+rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
+ struct rte_mbuf **pkts)
+{
+ __m128i pinfo0, pinfo1;
+ __m128i pinfo, ptype;
+ __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH);
+ __m128i cv_flags;
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ptype_mask =
+ _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
+ const __m128i ptype_ol_mask =
+ _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
+ const __m128i pinfo_mask =
+ _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
+ const __m128i cv_flag_sel =
+ _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
+ (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
+ PKT_RX_L4_CKSUM_GOOD) >> 1),
+ 0,
+ (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+ 0,
+ (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
+ (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
+ 0);
+ const __m128i cv_mask =
+ _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ const __m128i mbuf_init =
+ _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ /* Extract pkt_info field. */
+ pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
+ pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
+ pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
+ /* Extract hdr_type_etc field. */
+ pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
+ pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
+ ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
+ if (rxq->mark) {
+ const __m128i pinfo_ft_mask =
+ _mm_set_epi32(0xffffff00, 0xffffff00,
+ 0xffffff00, 0xffffff00);
+ const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
+ const __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
+ __m128i flow_tag, invalid_mask;
+
+ flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
+ /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
+ ol_flags = _mm_or_si128(ol_flags,
+ _mm_andnot_si128(invalid_mask,
+ fdir_flags));
+ /* Mask out invalid entries. */
+ flow_tag = _mm_andnot_si128(invalid_mask, flow_tag);
+ /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
+ ol_flags = _mm_or_si128(ol_flags,
+ _mm_andnot_si128(
+ _mm_cmpeq_epi32(flow_tag,
+ pinfo_ft_mask),
+ fdir_id_flags));
+ }
+ /*
+ * Merge the two fields to generate the following:
+ * bit[1] = l3_ok
+ * bit[2] = l4_ok
+ * bit[8] = cv
+ * bit[11:10] = l3_hdr_type
+ * bit[14:12] = l4_hdr_type
+ * bit[15] = ip_frag
+ * bit[16] = tunneled
+ * bit[17] = outer_l3_type
+ */
+ ptype = _mm_and_si128(ptype, ptype_mask);
+ pinfo = _mm_and_si128(pinfo, pinfo_mask);
+ pinfo = _mm_slli_epi32(pinfo, 16);
+ /* Make pinfo has merged fields for ol_flags calculation. */
+ pinfo = _mm_or_si128(ptype, pinfo);
+ ptype = _mm_srli_epi32(pinfo, 10);
+ ptype = _mm_packs_epi32(ptype, zero);
+ /* Errored packets will have RTE_PTYPE_ALL_MASK. */
+ op_err = _mm_srli_epi16(op_err, 8);
+ ptype = _mm_or_si128(ptype, op_err);
+ pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
+ pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
+ pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
+ pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
+ /* Fill flags for checksum and VLAN. */
+ pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
+ pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
+ /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
+ cv_flags = _mm_slli_epi32(pinfo, 9);
+ cv_flags = _mm_or_si128(pinfo, cv_flags);
+ /* Move back flags to start from byte[0]. */
+ cv_flags = _mm_srli_epi32(cv_flags, 8);
+ /* Mask out garbage bits. */
+ cv_flags = _mm_and_si128(cv_flags, cv_mask);
+ /* Merge to ol_flags. */
+ ol_flags = _mm_or_si128(ol_flags, cv_flags);
+ /* Merge mbuf_init and ol_flags. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
+ rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
+ /* Write 8B rearm_data and 8B ol_flags. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
+}
+
+/**
+ * Skip error packets.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+static uint16_t
+rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t n = 0;
+ unsigned int i;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t err_bytes = 0;
+#endif
+
+ for (i = 0; i < pkts_n; ++i) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ err_bytes += PKT_LEN(pkt);
+#endif
+ rte_pktmbuf_free_seg(pkt);
+ } else {
+ pkts[n++] = pkt;
+ }
+ }
+ rxq->stats.idropped += (pkts_n - n);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Correct counters of errored completions. */
+ rxq->stats.ipackets -= (pkts_n - n);
+ rxq->stats.ibytes -= err_bytes;
+#endif
+ rxq->pending_err = 0;
+ return n;
+}
+
+/**
+ * Receive burst of packets. An errored completion also consumes a mbuf, but the
+ * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
+ * before returning to application.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets received including errors (<= pkts_n).
+ */
+static inline uint16_t
+rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ const uint16_t q_n = 1 << rxq->cqe_n;
+ const uint16_t q_mask = q_n - 1;
+ volatile struct mlx5_cqe *cq;
+ struct rte_mbuf **elts;
+ unsigned int pos;
+ uint64_t n;
+ uint16_t repl_n;
+ uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
+ uint16_t nocmp_n = 0;
+ uint16_t rcvd_pkt = 0;
+ unsigned int cq_idx = rxq->cq_ci & q_mask;
+ unsigned int elts_idx;
+ unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
+ const __m128i owner_check =
+ _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
+ const __m128i opcode_check =
+ _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
+ const __m128i format_check =
+ _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
+ const __m128i resp_err_check =
+ _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t rcvd_byte = 0;
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const __m128i len_shuf_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 12, 13, 8, 9,
+ 4, 5, 0, 1);
+#endif
+ /* Mask to shuffle from extracted CQE to mbuf. */
+ const __m128i shuf_mask =
+ _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
+ 12, 13, 14, 15, /* rss, bswap32 */
+ 10, 11, /* vlan_tci, bswap16 */
+ 4, 5, /* data_len, bswap16 */
+ -1, -1, /* zero out 2nd half of pkt_len */
+ 4, 5 /* pkt_len, bswap16 */);
+ /* Mask to blend from the last Qword to the first DQword. */
+ const __m128i blend_mask =
+ _mm_set_epi8(-1, -1, -1, -1,
+ -1, -1, -1, -1,
+ 0, 0, 0, 0,
+ 0, 0, 0, -1);
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i ones = _mm_cmpeq_epi32(zero, zero);
+ const __m128i crc_adj =
+ _mm_set_epi16(0, 0, 0, 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN,
+ 0,
+ rxq->crc_present * ETHER_CRC_LEN);
+ const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
+
+ /* Compile time sanity check for this function. */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, pkt_info) != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rx_hash_res) !=
+ offsetof(struct mlx5_cqe, pkt_info) + 12);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd1) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd1) !=
+ offsetof(struct mlx5_cqe, hdr_type_etc));
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, vlan_info) !=
+ offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd2) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd2) !=
+ offsetof(struct mlx5_cqe, byte_cnt));
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, sop_drop_qpn) !=
+ RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
+ RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, op_own) !=
+ offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
+ assert(rxq->sges_n == 0);
+ assert(rxq->cqe_n == rxq->elts_n);
+ cq = &(*rxq->cqes)[cq_idx];
+ rte_prefetch0(cq);
+ rte_prefetch0(cq + 1);
+ rte_prefetch0(cq + 2);
+ rte_prefetch0(cq + 3);
+ pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
+ /*
+ * Order of indexes:
+ * rq_ci >= cq_ci >= rq_pi
+ * Definition of indexes:
+ * rq_ci - cq_ci := # of buffers owned by HW (posted).
+ * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
+ * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
+ */
+ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ rxq_replenish_bulk_mbuf(rxq, repl_n);
+ /* See if there're unreturned mbufs from compressed CQE. */
+ rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+ if (rcvd_pkt > 0) {
+ rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
+ rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
+ rxq->rq_pi += rcvd_pkt;
+ pkts += rcvd_pkt;
+ }
+ elts_idx = rxq->rq_pi & q_mask;
+ elts = &(*rxq->elts)[elts_idx];
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+ if (!pkts_n)
+ return rcvd_pkt;
+ /* At this point, there shouldn't be any remained packets. */
+ assert(rxq->rq_pi == rxq->cq_ci);
+ /*
+ * A. load first Qword (8bytes) in one loop.
+ * B. copy 4 mbuf pointers from elts ring to returing pkts.
+ * C. load remained CQE data and extract necessary fields.
+ * Final 16bytes cqes[] extracted from original 64bytes CQE has the
+ * following structure:
+ * struct {
+ * uint8_t pkt_info;
+ * uint8_t flow_tag[3];
+ * uint16_t byte_cnt;
+ * uint8_t rsvd4;
+ * uint8_t op_own;
+ * uint16_t hdr_type_etc;
+ * uint16_t vlan_info;
+ * uint32_t rx_has_res;
+ * } c;
+ * D. fill in mbuf.
+ * E. get valid CQEs.
+ * F. find compressed CQE.
+ */
+ for (pos = 0;
+ pos < pkts_n;
+ pos += MLX5_VPMD_DESCS_PER_LOOP) {
+ __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
+ __m128i cqe_tmp1, cqe_tmp2;
+ __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
+ __m128i op_own, op_own_tmp1, op_own_tmp2;
+ __m128i opcode, owner_mask, invalid_mask;
+ __m128i comp_mask;
+ __m128i mask;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ __m128i byte_cnt;
+#endif
+ __m128i mbp1, mbp2;
+ __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
+ unsigned int p1, p2, p3;
+
+ /* Prefetch next 4 CQEs. */
+ if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
+ rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
+ }
+ /* A.0 do not cross the end of CQ. */
+ mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ p = _mm_andnot_si128(mask, p);
+ /* A.1 load cqes. */
+ p3 = _mm_extract_epi16(p, 3);
+ cqes[3] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p3].sop_drop_qpn);
+ rte_compiler_barrier();
+ p2 = _mm_extract_epi16(p, 2);
+ cqes[2] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p2].sop_drop_qpn);
+ rte_compiler_barrier();
+ /* B.1 load mbuf pointers. */
+ mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
+ mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
+ /* A.1 load a block having op_own. */
+ p1 = _mm_extract_epi16(p, 1);
+ cqes[1] = _mm_loadl_epi64((__m128i *)
+ &cq[pos + p1].sop_drop_qpn);
+ rte_compiler_barrier();
+ cqes[0] = _mm_loadl_epi64((__m128i *)
+ &cq[pos].sop_drop_qpn);
+ /* B.2 copy mbuf pointers. */
+ _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
+ _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
+ rte_compiler_barrier();
+ /* C.1 load remained CQE data and extract necessary fields. */
+ cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
+ cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
+ cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
+ cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
+ cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
+ cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
+ cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
+ cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
+ cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
+ cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
+ /* C.2 generate final structure for mbuf with swapping bytes. */
+ pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
+ pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
+ /* C.3 adjust CRC length. */
+ pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
+ pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
+ /* C.4 adjust flow mark. */
+ pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
+ pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
+ /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
+ _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
+ /* E.1 extract op_own field. */
+ op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
+ /* C.1 load remained CQE data and extract necessary fields. */
+ cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
+ cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
+ cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
+ cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
+ cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
+ cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
+ cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
+ cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
+ cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
+ cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
+ /* C.2 generate final structure for mbuf with swapping bytes. */
+ pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
+ pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
+ /* C.3 adjust CRC length. */
+ pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
+ pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
+ /* C.4 adjust flow mark. */
+ pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
+ pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
+ /* E.1 extract op_own byte. */
+ op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
+ op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
+ /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
+ _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
+ /* E.2 flip owner bit to mark CQEs from last round. */
+ owner_mask = _mm_and_si128(op_own, owner_check);
+ if (ownership)
+ owner_mask = _mm_xor_si128(owner_mask, owner_check);
+ owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
+ owner_mask = _mm_packs_epi32(owner_mask, zero);
+ /* E.3 get mask for invalidated CQEs. */
+ opcode = _mm_and_si128(op_own, opcode_check);
+ invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
+ invalid_mask = _mm_packs_epi32(invalid_mask, zero);
+ /* E.4 mask out beyond boundary. */
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* E.5 merge invalid_mask with invalid owner. */
+ invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
+ /* F.1 find compressed CQE format. */
+ comp_mask = _mm_and_si128(op_own, format_check);
+ comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
+ comp_mask = _mm_packs_epi32(comp_mask, zero);
+ /* F.2 mask out invalid entries. */
+ comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
+ comp_idx = _mm_cvtsi128_si64(comp_mask);
+ /* F.3 get the first compressed CQE. */
+ comp_idx = comp_idx ?
+ __builtin_ctzll(comp_idx) /
+ (sizeof(uint16_t) * 8) :
+ MLX5_VPMD_DESCS_PER_LOOP;
+ /* E.6 mask out entries after the compressed CQE. */
+ mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* E.7 count non-compressed valid CQEs. */
+ n = _mm_cvtsi128_si64(invalid_mask);
+ n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
+ MLX5_VPMD_DESCS_PER_LOOP;
+ nocmp_n += n;
+ /* D.2 get the final invalid mask. */
+ mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
+ mask = _mm_sll_epi64(ones, mask);
+ invalid_mask = _mm_or_si128(invalid_mask, mask);
+ /* D.3 check error in opcode. */
+ opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
+ opcode = _mm_packs_epi32(opcode, zero);
+ opcode = _mm_andnot_si128(invalid_mask, opcode);
+ /* D.4 mark if any error is set */
+ rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
+ /* D.5 fill in mbuf - rearm_data and packet_type. */
+ rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Add up received bytes count. */
+ byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
+ byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
+ byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
+ rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
+#endif
+ /*
+ * Break the loop unless more valid CQE is expected, or if
+ * there's a compressed CQE.
+ */
+ if (n != MLX5_VPMD_DESCS_PER_LOOP)
+ break;
+ }
+ /* If no new CQE seen, return without updating cq_db. */
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ return rcvd_pkt;
+ /* Update the consumer indexes for non-compressed CQEs. */
+ assert(nocmp_n <= pkts_n);
+ rxq->cq_ci += nocmp_n;
+ rxq->rq_pi += nocmp_n;
+ rcvd_pkt += nocmp_n;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += nocmp_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ /* Decompress the last CQE if compressed. */
+ if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
+ assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+ /* Return more packets if needed. */
+ if (nocmp_n < pkts_n) {
+ uint16_t n = rxq->cq_ci - rxq->rq_pi;
+
+ n = RTE_MIN(n, pkts_n - nocmp_n);
+ rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
+ rxq->rq_pi += n;
+ rcvd_pkt += n;
+ }
+ }
+ rte_wmb();
+ *rxq->cq_db = htonl(rxq->cq_ci);
+ return rcvd_pkt;
+}
+
+/**
+ * DPDK callback for vectorized RX.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct rxq *rxq = dpdk_rxq;
+ uint16_t nb_rx;
+
+ nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
+ if (unlikely(rxq->pending_err))
+ nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
+ return nb_rx;
+}
+
+/**
+ * Check Tx queue flags are set for raw vectorized Tx.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_raw_vec_tx_support(struct priv *priv)
+{
+ uint16_t i;
+
+ /* All the configured queues should support. */
+ for (i = 0; i < priv->txqs_n; ++i) {
+ struct txq *txq = (*priv->txqs)[i];
+
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
+ !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ break;
+ }
+ if (i != priv->txqs_n)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized TX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_vec_tx_support(struct priv *priv)
+{
+ if (!priv->tx_vec_en ||
+ priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ priv->mps != MLX5_MPW_ENHANCED ||
+ priv->tso)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a RX queue can support vectorized RX.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+rxq_check_vec_support(struct rxq *rxq)
+{
+ struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+
+ if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized RX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_vec_rx_support(struct priv *priv)
+{
+ uint16_t i;
+
+ if (!priv->rx_vec_en)
+ return -ENOTSUP;
+ /* All the configured queues should support. */
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+
+ if (rxq_check_vec_support(rxq) < 0)
+ break;
+ }
+ if (i != priv->rxqs_n)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Prepare for vectorized RX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_prep_vec_rx_function(struct priv *priv)
+{
+ uint16_t i;
+
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
+ const uint16_t desc = 1 << rxq->elts_n;
+ int j;
+
+ assert(rxq->elts_n == rxq->cqe_n);
+ /* Initialize default rearm_data for vPMD. */
+ mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_mbuf_refcnt_set(mbuf_init, 1);
+ mbuf_init->nb_segs = 1;
+ mbuf_init->port = rxq->port_id;
+ /*
+ * prevent compiler reordering:
+ * rearm_data covers previous fields.
+ */
+ rte_compiler_barrier();
+ rxq->mbuf_initializer =
+ *(uint64_t *)&mbuf_init->rearm_data;
+ /* Padding with a fake mbuf for vectorized Rx. */
+ for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
+ (*rxq->elts)[desc + j] = &rxq->fake_mbuf;
+ /* Mark that it need to be cleaned up for rxq_alloc_elts(). */
+ rxq->trim_elts = 1;
+ }
+}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 8c5aa691..595a9e06 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -72,6 +72,9 @@ mlx5_dev_start(struct rte_eth_dev *dev)
priv_unlock(priv);
return 0;
}
+ /* Update Rx/Tx callback. */
+ priv_select_tx_function(priv);
+ priv_select_rx_function(priv);
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
err = priv_create_hash_rxqs(priv);
if (!err)
@@ -94,12 +97,13 @@ mlx5_dev_start(struct rte_eth_dev *dev)
(void *)priv, strerror(err));
goto error;
}
- priv_dev_interrupt_handler_install(priv, dev);
- if (dev->data->dev_conf.intr_conf.rxq) {
- err = priv_intr_efd_enable(priv);
- if (!err)
- err = priv_create_intr_vec(priv);
+ err = priv_rx_intr_vec_enable(priv);
+ if (err) {
+ ERROR("%p: RX interrupt vector creation failed",
+ (void *)priv);
+ goto error;
}
+ priv_dev_interrupt_handler_install(priv, dev);
priv_xstats_init(priv);
priv_unlock(priv);
return 0;
@@ -140,11 +144,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_destroy_hash_rxqs(priv);
priv_fdir_disable(priv);
priv_flow_stop(priv);
+ priv_rx_intr_vec_disable(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
- if (priv->dev->data->dev_conf.intr_conf.rxq) {
- priv_destroy_intr_vec(priv);
- priv_intr_efd_disable(priv);
- }
priv->started = 0;
priv_unlock(priv);
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index bf72468d..98aaa7ca 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -64,7 +64,6 @@
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
/**
* Allocate TX queue elements.
@@ -103,9 +102,10 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
static void
txq_free_elts(struct txq_ctrl *txq_ctrl)
{
- unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
- unsigned int elts_head = txq_ctrl->txq.elts_head;
- unsigned int elts_tail = txq_ctrl->txq.elts_tail;
+ const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ uint16_t elts_head = txq_ctrl->txq.elts_head;
+ uint16_t elts_tail = txq_ctrl->txq.elts_tail;
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
DEBUG("%p: freeing WRs", (void *)txq_ctrl);
@@ -114,18 +114,17 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
txq_ctrl->txq.elts_comp = 0;
while (elts_tail != elts_head) {
- struct rte_mbuf *elt = (*elts)[elts_tail];
+ struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
assert(elt != NULL);
rte_pktmbuf_free_seg(elt);
#ifndef NDEBUG
/* Poisoning. */
- memset(&(*elts)[elts_tail],
+ memset(&(*elts)[elts_tail & elts_m],
0x77,
- sizeof((*elts)[elts_tail]));
+ sizeof((*elts)[elts_tail & elts_m]));
#endif
- if (++elts_tail == elts_n)
- elts_tail = 0;
+ ++elts_tail;
}
}
@@ -149,9 +148,8 @@ txq_cleanup(struct txq_ctrl *txq_ctrl)
if (txq_ctrl->cq != NULL)
claim_zero(ibv_destroy_cq(txq_ctrl->cq));
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- if (txq_ctrl->txq.mp2mr[i].mp == NULL)
+ if (txq_ctrl->txq.mp2mr[i].mr == NULL)
break;
- assert(txq_ctrl->txq.mp2mr[i].mr != NULL);
claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
}
memset(txq_ctrl, 0, sizeof(*txq_ctrl));
@@ -244,7 +242,7 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
goto error;
}
- (void)conf; /* Thresholds configuration (ignored). */
+ tmpl.txq.flags = conf->txq_flags;
assert(desc > MLX5_TX_COMP_THRESH);
tmpl.txq.elts_n = log2above(desc);
if (priv->mps == MLX5_MPW_ENHANCED)
@@ -497,8 +495,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
DEBUG("%p: adding TX queue %p to list",
(void *)dev, (void *)txq_ctrl);
(*priv->txqs)[idx] = &txq_ctrl->txq;
- /* Update send callback. */
- priv_select_tx_function(priv);
}
priv_unlock(priv);
return -ret;
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 5479fb36..92b03c4c 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -39,8 +39,6 @@
* Netronome vNIC DPDK Poll-Mode Driver: Main entry point
*/
-#include <math.h>
-
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_log.h>
@@ -647,7 +645,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
static int
nfp_net_start(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
@@ -772,7 +770,7 @@ nfp_net_close(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Close");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
/*
* We assume that the DPDK application is stopping all the
@@ -1081,7 +1079,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1173,7 +1171,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
* Other PMDs are just checking the DD bit in intervals of 4
* descriptors and counting all four if the first has the DD
* bit on. Of course, this is not accurate but can be good for
- * perfomance. But ideally that should be done in descriptors
+ * performance. But ideally that should be done in descriptors
* chunks belonging to the same cache line
*/
@@ -1201,7 +1199,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
int base = 0;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
base = 1;
@@ -1221,7 +1219,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
int base = 0;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
base = 1;
@@ -1235,7 +1233,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static void
nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -1269,7 +1267,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
/* If MSI-X auto-masking is used, clear the entry */
@@ -1334,7 +1332,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
nfp_net_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
nfp_net_dev_link_status_print(dev);
@@ -1473,7 +1471,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
* of descriptors in log2 format
*/
nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
- nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc));
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
return 0;
}
@@ -1628,7 +1626,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
* of descriptors in log2 format
*/
nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
- nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc));
+ nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
return 0;
}
@@ -2450,7 +2448,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index abf3ec75..5aef0591 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -49,7 +49,6 @@ static unsigned default_packet_copy;
static const char *valid_arguments[] = {
ETH_NULL_PACKET_SIZE_ARG,
ETH_NULL_PACKET_COPY_ARG,
- "driver",
NULL
};
@@ -139,8 +138,6 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
packet_size);
bufs[i]->data_len = (uint16_t)packet_size;
bufs[i]->pkt_len = packet_size;
- bufs[i]->nb_segs = 1;
- bufs[i]->next = NULL;
bufs[i]->port = h->internals->port_id;
}
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 3323914c..f03441d9 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -101,7 +101,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 3f895cd4..2603a8b3 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -16,6 +16,10 @@
#include "ecore_mcp_api.h"
#include "ecore_l2_api.h"
+/* Array of memzone pointers */
+static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
+/* Counter to track current memzone allocated */
+uint16_t ecore_mz_count;
unsigned long qede_log2_align(unsigned long n)
{
@@ -118,6 +122,13 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@@ -134,9 +145,11 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n",
- mz->len, mz->phys_addr, mz->addr, socket_id);
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
@@ -148,6 +161,13 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@@ -163,12 +183,30 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n",
- mz->len, mz->phys_addr, mz->addr, core_id);
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated aligned dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
+void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
+{
+ uint16_t j;
+
+ for (j = 0 ; j < ecore_mz_count; j++) {
+ if (phys == ecore_mz_mapping[j]->phys_addr) {
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Free memzone %s\n", ecore_mz_mapping[j]->name);
+ rte_memzone_free(ecore_mz_mapping[j]);
+ return;
+ }
+ }
+
+ DP_ERR(p_dev, "Unexpected memory free request\n");
+}
+
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
@@ -211,11 +249,46 @@ qede_get_mcp_proto_stats(struct ecore_dev *edev,
if (type == ECORE_MCP_LAN_STATS) {
ecore_get_vport_stats(edev, &lan_stats);
- stats->lan_stats.ucast_rx_pkts = lan_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = lan_stats.tx_ucast_pkts;
+
+ /* @DPDK */
+ stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
+
stats->lan_stats.fcs_err = -1;
} else {
DP_INFO(edev, "Statistics request type %d not supported\n",
type);
}
}
+
+void
+qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
+{
+ char err_str[64];
+
+ switch (err_type) {
+ case ECORE_HW_ERR_FAN_FAIL:
+ strcpy(err_str, "Fan Failure");
+ break;
+ case ECORE_HW_ERR_MFW_RESP_FAIL:
+ strcpy(err_str, "MFW Response Failure");
+ break;
+ case ECORE_HW_ERR_HW_ATTN:
+ strcpy(err_str, "HW Attention");
+ break;
+ case ECORE_HW_ERR_DMAE_FAIL:
+ strcpy(err_str, "DMAE Failure");
+ break;
+ case ECORE_HW_ERR_RAMROD_FAIL:
+ strcpy(err_str, "Ramrod Failure");
+ break;
+ case ECORE_HW_ERR_FW_ASSERT:
+ strcpy(err_str, "FW Assertion");
+ break;
+ default:
+ strcpy(err_str, "Unknown");
+ }
+
+ DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
+ ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
+}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 32c9b251..3acf8f7c 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -27,6 +27,7 @@ struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
enum ecore_mcp_protocol_type;
union ecore_mcp_protocol_stats;
+enum ecore_hw_err_type;
void qed_link_update(struct ecore_hwfn *hwfn);
@@ -107,14 +108,16 @@ void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
size_t, int);
+void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
+
#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
osal_dma_alloc_coherent(dev, phys, size)
#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
osal_dma_alloc_coherent_aligned(dev, phys, size, align)
-/* TODO: */
-#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \
+ osal_dma_free_mem(dev, phys)
/* HW reads/writes */
@@ -348,6 +351,8 @@ u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
struct ecore_vf_acquire_sw_info *);
+void qede_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type);
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \
qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info)
@@ -356,7 +361,8 @@ void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
/* TODO: */
#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
-#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) \
+ qede_hw_err_notify(hwfn, err_type)
#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
#define OSAL_NUM_ACTIVE_CPU() 0
@@ -421,6 +427,9 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+#define OSAL_CRC32(crc, buf, length) 0
+#define OSAL_CRC8_POPULATE(table, polynomial) nothing
+#define OSAL_CRC8(table, pdata, nbytes, crc) 0
#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index cbcde227..bfe50e1f 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -97,8 +97,8 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 18
-#define FW_REVISION_VERSION 9
+#define FW_MINOR_VERSION 20
+#define FW_REVISION_VERSION 0
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -913,24 +913,25 @@ struct db_l2_dpm_data {
__le16 bd_prod /* bd producer value to update */;
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
-#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type)
*/
-#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
-#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
-#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
-#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
/* size of the packet to be transmitted in bytes */
-#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
-#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
-#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
/* In DPM_L2_BD mode: the number of SGE-s */
-#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
-#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+/* Flag indicating whether to enable GFS search */
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
};
/*
@@ -989,26 +990,29 @@ struct db_pwm_addr {
struct db_rdma_dpm_params {
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
/* opcode for RDMA operation */
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
/* the size of the WQE payload in bytes */
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
/* RoCE completion flag */
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+/* Connection type is iWARP */
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
/*
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 80b11a4c..0d68a9bc 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -31,7 +31,7 @@
#define ECORE_MAJOR_VERSION 8
#define ECORE_MINOR_VERSION 18
#define ECORE_REVISION_VERSION 7
-#define ECORE_ENGINEERING_VERSION 0
+#define ECORE_ENGINEERING_VERSION 1
#define ECORE_VERSION \
((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
@@ -770,7 +770,7 @@ struct ecore_dev {
bool attn_clr_en;
/* Indicates whether allowing the MFW to collect a crash dump */
- bool mdump_en;
+ bool allow_mdump;
/* Indicates if the reg_fifo is checked after any register access */
bool chk_reg_fifo;
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 865103c6..65b89b8f 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1080,7 +1080,7 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ "Sending final cleanup for PFVF[%d] [Command %08x]\n",
id, command);
ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
@@ -1776,13 +1776,6 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* perform debug configuration when chip is out of reset */
OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
- /* Cleanup chip from previous driver if such remains exist */
- rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc != ECORE_SUCCESS) {
- ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
- return rc;
- }
-
/* PF Init sequence */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -1866,17 +1859,17 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
}
-static enum _ecore_status_t
-ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 enable)
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable)
{
- u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
- /* Change PF in PXP */
+ /* Configure the PF's internal FID_enable for master transactions */
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
- /* wait until value is set - try for 1 second every 50us */
+ /* Wait until value is set - try for 1 second every 50us */
for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
@@ -1918,14 +1911,21 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
struct ecore_load_req_params load_req_params;
- u32 load_code, param, drv_mb_param;
+ u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
- enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
@@ -1942,7 +1942,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ p_hwfn = &p_dev->hwfns[i];
/* If management didn't provide a default, set one of our own */
if (!p_hwfn->hw_info.mtu) {
@@ -1955,11 +1955,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
continue;
}
- /* Enable DMAE in PXP */
- rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
- if (rc != ECORE_SUCCESS)
- return rc;
-
rc = ecore_calc_hw_mode(p_hwfn);
if (rc != ECORE_SUCCESS)
return rc;
@@ -2009,6 +2004,30 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
qm_lock_init = true;
}
+ /* Clean up chip from previous driver if such remains exist.
+ * This is not needed when the PF is the first one on the
+ * engine, since afterwards we are going to init the FW.
+ */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+ rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->rel_pf_id, false);
+ if (rc != ECORE_SUCCESS) {
+ ecore_hw_err_notify(p_hwfn,
+ ECORE_HW_ERR_RAMROD_FAIL);
+ goto load_err;
+ }
+ }
+
+ /* Log and clean previous pglue_b errors if such exist */
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Enable the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ true);
+ if (rc != ECORE_SUCCESS)
+ goto load_err;
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -2037,35 +2056,28 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
break;
}
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
"init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
+ goto load_err;
+ }
- /* ACK mfw regardless of success or failure of initialization */
- mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_LOAD_DONE,
- 0, &load_code, &param);
+ rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
return rc;
- if (mfw_rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "Failed sending a LOAD_DONE command\n");
- return mfw_rc;
- }
-
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
- mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
- &load_code, &param);
- if (mfw_rc != ECORE_SUCCESS) {
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
"Failed to send DCBX attention request\n");
- return mfw_rc;
+ return rc;
}
p_hwfn->hw_init_done = true;
@@ -2076,7 +2088,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
drv_mb_param = STORM_FW_VERSION;
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
- drv_mb_param, &load_code, &param);
+ drv_mb_param, &resp, &param);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
@@ -2094,6 +2106,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
return rc;
+
+load_err:
+ /* The MFW load lock should be released regardless of success or failure
+ * of initialization.
+ * TODO: replace this with an attempt to send cancel_load.
+ */
+ ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ return rc;
}
#define ECORE_HW_STOP_RETRY_LIMIT (10)
@@ -2261,18 +2281,20 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
}
} /* hwfn loop */
- if (IS_PF(p_dev)) {
+ if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
- */
- rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+ /* Clear the PF's internal FID_enable in the PXP.
+ * In CMT this should only be done for first hw-function, and
+ * only after all transactions have stopped for all active
+ * hw-functions.
+ */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ false);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "ecore_change_pci_hwfn failed. rc = %d.\n",
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
}
@@ -2370,9 +2392,8 @@ static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
- /* Clean Previous errors if such exist */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+ /* Clean previous pglue_b errors if such exist */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* enable internal target-read */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -3565,6 +3586,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
enum _ecore_status_t rc;
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ p_dev->allow_mdump = p_params->allow_mdump;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -3718,7 +3740,7 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
if (!p_chain->b_external_pbl)
OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table, pbl_size);
- out:
+out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@@ -3994,92 +4016,182 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter)
+static enum _ecore_status_t
+ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
{
- u32 high, low, en;
+ u32 en;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return ECORE_SUCCESS;
-
- high = p_filter[1] | (p_filter[0] << 8);
- low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-
/* Find a free entry and utilize it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
if (en)
continue;
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), low);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), high);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 1);
break;
}
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
- DP_NOTICE(p_hwfn, false,
- "Failed to find an empty LLH filter to utilize\n");
- return ECORE_INVAL;
- }
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
- p_filter[0], p_filter[1], p_filter[2],
- p_filter[3], p_filter[4], p_filter[5], i);
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
return ECORE_SUCCESS;
}
-void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter)
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
{
- u32 high, low;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return;
+ return ECORE_SUCCESS;
high = p_filter[1] | (p_filter[0] << 8);
low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
+{
+ int i;
/* Find the entry and clean it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32)) != low)
continue;
if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32)) != high)
continue;
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), 0);
break;
}
+
if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high,
+ low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Tried to remove a non-configured filter\n");
+ return;
+ }
+
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+}
+
+static enum _ecore_status_t
+ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ u32 en;
+ int i;
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 1 << type);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
}
enum _ecore_status_t
@@ -4089,14 +4201,15 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
u16 dest_port,
enum ecore_llh_port_filter_type_t type)
{
- u32 high, low, en;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return ECORE_SUCCESS;
high = 0;
low = 0;
+
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
high = source_port_or_eth_type;
@@ -4118,67 +4231,109 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
"Non valid LLH protocol filter type %d\n", type);
return ECORE_INVAL;
}
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
- if (en)
- continue;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), low);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), high);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 1 << type);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Failed to find an empty LLH filter to utilize\n");
- return ECORE_NORESOURCES;
+ return rc;
}
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"ETH type %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_TCP_SRC_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"TCP src port %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_UDP_SRC_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"UDP src port %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_TCP_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP dst port %x is added at %d\n", dest_port, i);
+ "TCP dst port %x is added at %d\n", dest_port,
+ entry_num);
break;
case ECORE_LLH_FILTER_UDP_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP dst port %x is added at %d\n", dest_port, i);
+ "UDP dst port %x is added at %d\n", dest_port,
+ entry_num);
break;
case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"TCP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
+ source_port_or_eth_type, dest_port, entry_num);
break;
case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"UDP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
+ source_port_or_eth_type, dest_port, entry_num);
break;
}
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ int i;
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!(ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32)) & (1 << type)))
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
return ECORE_SUCCESS;
}
@@ -4189,14 +4344,15 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
u16 dest_port,
enum ecore_llh_port_filter_type_t type)
{
- u32 high, low;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return;
high = 0;
low = 0;
+
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
high = source_port_or_eth_type;
@@ -4219,49 +4375,24 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
return;
}
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
- continue;
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
- continue;
- if (!(ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32)) & (1 << type)))
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), 0);
- break;
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n",
+ type, source_port_or_eth_type, dest_port);
+ return;
}
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- DP_NOTICE(p_hwfn, false,
- "Tried to remove a non-configured filter\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n",
+ type, source_port_or_eth_type, dest_port, entry_num);
}
-void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
int i;
@@ -4270,16 +4401,27 @@ void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), 0);
}
}
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt);
+}
+
enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
@@ -4396,7 +4538,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
goto out;
- out:
+out:
return rc;
}
@@ -4434,7 +4576,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
- out:
+out:
return rc;
}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index e64a768d..9126cf95 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -186,6 +186,9 @@ struct ecore_hw_prepare_params {
/* The OS Epoch time in seconds */
u32 epoch;
+ /* Allow the MFW to collect a crash dump */
+ bool allow_mdump;
+
/* Allow prepare to pass even if some initializations are failing.
* If set, the `p_prepare_res' field would be set with the return,
* and might allow probe to pass even if there are certain issues.
@@ -238,7 +241,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#ifndef __EXTRACT__LINUX__
-struct ecore_eth_stats {
+struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -270,11 +273,6 @@ struct ecore_eth_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -291,14 +289,8 @@ struct ecore_eth_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
- u64 tx_1519_to_2047_byte_packets;
- u64 tx_2048_to_4095_byte_packets;
- u64 tx_4096_to_9216_byte_packets;
- u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
- u64 tx_lpi_entry_count;
- u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
@@ -312,6 +304,33 @@ struct ecore_eth_stats {
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
};
+
+struct ecore_eth_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct ecore_eth_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct ecore_eth_stats {
+ struct ecore_eth_stats_common common;
+ union {
+ struct ecore_eth_stats_bb bb;
+ struct ecore_eth_stats_ah ah;
+ };
+};
#endif
enum ecore_dmae_address_type_t {
@@ -581,4 +600,16 @@ enum _ecore_status_t
ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
u16 tx_coal, void *p_handle);
+/**
+ * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_enable - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable);
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 3042ed55..5c2a08f9 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -856,7 +856,8 @@ struct core_rx_gsi_offload_cqe {
__le16 vlan /* 802.1q VLAN tag */;
__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
- u8 reserved1[2];
+/* These are the lower 16 bit of QP id in RoCE BTH header */
+ __le16 qp_id;
__le32 gid_dst[4] /* Gid destination address */;
};
@@ -998,11 +999,9 @@ struct core_tx_bd {
*/
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-/* Packet destination - Network, LB (use enum core_tx_dest) */
-#define CORE_TX_BD_TX_DST_MASK 0x1
+/* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
+#define CORE_TX_BD_TX_DST_MASK 0x3
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED_MASK 0x1
-#define CORE_TX_BD_RESERVED_SHIFT 15
};
@@ -1011,8 +1010,10 @@ struct core_tx_bd {
* Light L2 TX Destination
*/
enum core_tx_dest {
- CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
- CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
+ CORE_TX_DEST_NW /* TX Destination to the Network */,
+ CORE_TX_DEST_LB /* TX Destination to the Loopback */,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP /* TX Drop */,
MAX_CORE_TX_DEST
};
@@ -1337,20 +1338,14 @@ struct pf_start_tunnel_config {
* FW will use a default port
*/
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
-/* If set, enable l2 GENEVE tunnel in TX path. */
- u8 tx_enable_l2geneve;
-/* If set, enable IP GENEVE tunnel in TX path. */
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
- u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
-/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
+/* Rx classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
-/* Classification scheme for ip GENEVE tunnel. */
+/* Rx classification scheme for ip GENEVE tunnel. */
u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
- u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
+ u8 reserved;
/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
__le16 vxlan_udp_port;
/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
@@ -1366,6 +1361,7 @@ struct pf_start_ramrod_data {
struct regpair consolid_q_pbl_addr;
/* tunnel configuration. */
struct pf_start_tunnel_config tunnel_config;
+ __le32 reserved;
__le16 event_ring_sb_id /* Status block ID */;
/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
u8 base_vf_id;
@@ -1425,19 +1421,10 @@ struct pf_update_tunnel_config {
* unicast outer MAC in NPAR mode.
*/
u8 update_rx_def_non_ucast_clss;
-/* Update TX per PF tunnel classification scheme. used by pf update. */
- u8 update_tx_pf_clss;
/* Update VXLAN tunnel UDP destination port. */
u8 set_vxlan_udp_port_flg;
/* Update GENEVE tunnel UDP destination port. */
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
-/* If set, enable l2 GENEVE tunnel in TX path. */
- u8 tx_enable_l2geneve;
-/* If set, enable IP GENEVE tunnel in TX path. */
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
/* Classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
@@ -1447,7 +1434,7 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
- __le16 reserved[2];
+ __le16 reserved;
};
/*
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 917e8f4c..7443ff9d 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -67,6 +67,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PTLD = 0x590000,
+ GRCBASE_YPLD = 0x5b0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -80,6 +82,10 @@ enum block_addr {
GRCBASE_TCFC = 0x2d0000,
GRCBASE_IGU = 0x180000,
GRCBASE_CAU = 0x1c0000,
+ GRCBASE_RGFS = 0xf00000,
+ GRCBASE_RGSRC = 0x320000,
+ GRCBASE_TGFS = 0xd00000,
+ GRCBASE_TGSRC = 0x322000,
GRCBASE_UMAC = 0x51000,
GRCBASE_XMAC = 0x210000,
GRCBASE_DBG = 0x10000,
@@ -93,12 +99,6 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_RGFS = 0x1fa0000,
- GRCBASE_RGSRC = 0x1fa8000,
- GRCBASE_TGFS = 0x1fb0000,
- GRCBASE_TGSRC = 0x1fb8000,
- GRCBASE_PTLD = 0x1fc0000,
- GRCBASE_YPLD = 0x1fe0000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -159,6 +159,8 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -172,6 +174,10 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
BLOCK_DBG,
@@ -185,12 +191,6 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
- BLOCK_RGFS,
- BLOCK_RGSRC,
- BLOCK_TGFS,
- BLOCK_TGSRC,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index fca74791..48b0048f 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -15,6 +15,10 @@
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
+/* Size of CRC8 lookup table */
+#ifndef LINUX_REMOVE
+#define CRC8_TABLE_SIZE 256
+#endif
/*
* BRB RAM init requirements
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 004ab351..b5ef173e 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1590,7 +1590,8 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
/* Filters are per PF!! */
SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
@@ -1644,8 +1645,9 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
i * REG_SIZE, *(ramLinePointer + i));
/* Set default profile so that no filter match will happen */
- ramLine.lo = 0xffff;
- ramLine.hi = 0xffff;
+ ramLine.lo = 0xffffffff;
+ ramLine.hi = 0x3ff;
+
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
@@ -1722,40 +1724,30 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
return offset;
}
-/* Calculate CRC8 of first 4 bytes in buf */
-static u8 ecore_calc_crc8(const u8 *buf)
-{
- u32 i, j, crc = 0xff << 8;
-
- /* CRC-8 polynomial */
- #define POLY 0x1070
-
- for (j = 0; j < 4; j++, buf++) {
- crc ^= (*buf << 8);
- for (i = 0; i < 8; i++) {
- if (crc & 0x8000)
- crc ^= (POLY << 3);
-
- crc <<= 1;
- }
- }
-
- return (u8)(crc >> 8);
-}
+#ifndef LINUX_REMOVE
+#define CRC8_INIT_VALUE 0xFF
+#define CRC8_TABLE_SIZE 256
+#endif
+static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
-/* Calculate and return CDU validation byte per conneciton type / region /
+/* Calculate and return CDU validation byte per connection type / region /
* cid
*/
-static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
- u32 cid)
+static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
+ u8 conn_type,
+ u8 region, u32 cid)
{
const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+
+ static u8 crc8_table_valid; /*automatically initialized to 0*/
u8 crc, validation_byte = 0;
u32 validation_string = 0;
- const u8 *data_to_crc_rev;
- u8 data_to_crc[4];
+ u32 data_to_crc;
- data_to_crc_rev = (const u8 *)&validation_string;
+ if (crc8_table_valid == 0) {
+ OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
/*
* The CRC is calculated on the String-to-compress:
@@ -1772,13 +1764,22 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
validation_string |= (conn_type & 0xF);
- /* Convert to big-endian (ntoh())*/
- data_to_crc[0] = data_to_crc_rev[3];
- data_to_crc[1] = data_to_crc_rev[2];
- data_to_crc[2] = data_to_crc_rev[1];
- data_to_crc[3] = data_to_crc_rev[0];
-
- crc = ecore_calc_crc8(data_to_crc);
+ /* Convert to big-endian and calculate CRC8*/
+ data_to_crc = OSAL_BE32_TO_CPU(validation_string);
+
+ crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
+ CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
validation_byte |= ((validation_cfg >>
CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
@@ -1793,8 +1794,9 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 cid)
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
@@ -1805,14 +1807,14 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+ *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
@@ -1821,7 +1823,8 @@ void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
+ 1, tid);
}
/* Memset session context to 0 while preserving validation bytes */
@@ -1847,8 +1850,7 @@ void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
}
/* Memset task context to 0 while preserving validation bytes */
-void ecore_memset_task_ctx(void *p_ctx_mem, const u32 ctx_size,
- const u8 ctx_type)
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
{
u8 *p_ctx, *region1_val_ptr;
u8 region1_val;
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 4da3fc29..488dc005 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -424,48 +424,54 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
- * session context.
+ * session context.
*
- *
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param cid - context cid.
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
*/
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 cid);
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 cid);
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
- * context.
- *
+ * context.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param tid - context tid.
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
*/
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 tid);
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 tid);
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
- * preserving validation bytes.
+ * preserving validation bytes.
*
- *
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
-void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size,
+void ecore_memset_session_ctx(void *p_ctx_mem,
+ u32 ctx_size,
u8 ctx_type);
/**
- * @brief ecore_memset_task_ctx - Memset session context to 0 while preserving
- * validation bytes.
- *
+ * @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
+ * validation bytes.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
-void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size,
+void ecore_memset_task_ctx(void *p_ctx_mem,
+ u32 ctx_size,
u8 ctx_type);
#endif
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 8dc4d150..b57c510c 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -284,122 +284,119 @@ out:
#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
-static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 tmp;
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal write by chip to [%08x:%08x] blocked."
- "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
- " Details2 %08x [Was_error %02x BME deassert %02x"
- " FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
- ? 1 : 0), tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
- 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0));
+ DP_NOTICE(p_hwfn, false,
+ "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal read by chip from [%08x:%08x] blocked."
- " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
- " Details2 %08x [Was_error %02x BME deassert %02x"
- " FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
- ? 1 : 0), tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
- 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0));
+ DP_NOTICE(p_hwfn, false,
+ "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
- DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+ DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
u32 addr_hi, addr_lo;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
- DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
- tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, false,
+ "ICPL erorr - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
u32 addr_hi, addr_lo, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_DETAILS);
- DP_INFO(p_hwfn,
- "ILT error - Details %08x Details2 %08x"
- " [Address %08x:%08x]\n",
- details, tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, false,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
}
/* Clear the indications */
- ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+}
+
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
{
DP_NOTICE(p_hwfn, false, "FW assertion!\n");
@@ -505,7 +502,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{ /* After Invert 2 */
{"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
+ {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
BLOCK_PGLUE_B},
{"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@@ -827,8 +824,9 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
ATTN_TYPE_INTERRUPT, !b_fatal);
}
+ /* @DPDK */
/* Reach assertion if attention is fatal */
- if (b_fatal) {
+ if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
p_bit_name);
@@ -842,7 +840,7 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
u32 mask = ~bitmask;
val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
- DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
p_bit_name);
}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 0c8929e3..067ed605 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -208,4 +208,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
#endif
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 6764bfa6..bc8df8f8 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -27,7 +27,7 @@ static const struct iro iro_arr[49] = {
/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
{ 0x84, 0x8, 0x0, 0x0, 0x2},
/* XSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ { 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3df0, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
@@ -37,13 +37,13 @@ static const struct iro iro_arr[49] = {
/* MSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4990, 0x0, 0x0, 0x0, 0x78},
/* USTORM_INTEG_TEST_DATA_OFFSET */
- { 0x7e48, 0x0, 0x0, 0x0, 0x78},
+ { 0x7f48, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0x61f8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0xb820, 0x30, 0x0, 0x0, 0x30},
+ { 0xbd20, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
{ 0x95b8, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
@@ -57,9 +57,9 @@ static const struct iro iro_arr[49] = {
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x8050, 0x40, 0x0, 0x0, 0x30},
+ { 0x8150, 0x40, 0x0, 0x0, 0x30},
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xe770, 0x60, 0x0, 0x0, 0x60},
+ { 0xec70, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
@@ -89,7 +89,7 @@ static const struct iro iro_arr[49] = {
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12988, 0x10, 0x0, 0x0, 0x8},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x11aa0, 0x38, 0x0, 0x0, 0x18},
+ { 0x11fa0, 0x38, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xa8c0, 0x38, 0x0, 0x0, 0x10},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 4ab8fd5f..e58b8fa0 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1714,13 +1714,20 @@ static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&pstats, 0, sizeof(pstats));
ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
- p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
@@ -1746,10 +1753,10 @@ static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&tstats, 0, sizeof(tstats));
ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
- p_stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- p_stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1783,12 +1790,18 @@ static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&ustats, 0, sizeof(ustats));
ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
- p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1822,23 +1835,27 @@ static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&mstats, 0, sizeof(mstats));
ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
- p_stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
- p_stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- p_stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
- p_stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_eth_stats *p_stats)
{
+ struct ecore_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
int j;
@@ -1849,54 +1866,75 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.eth.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
- p_stats->rx_crc_errors += port_stats.eth.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
- p_stats->rx_pause_frames += port_stats.eth.rxpf;
- p_stats->rx_pfc_frames += port_stats.eth.rxpp;
- p_stats->rx_align_errors += port_stats.eth.raln;
- p_stats->rx_carrier_errors += port_stats.eth.rfcr;
- p_stats->rx_oversize_packets += port_stats.eth.rovr;
- p_stats->rx_jabbers += port_stats.eth.rjbr;
- p_stats->rx_undersize_packets += port_stats.eth.rund;
- p_stats->rx_fragments += port_stats.eth.rfrg;
- p_stats->tx_64_byte_packets += port_stats.eth.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
- p_stats->tx_pause_frames += port_stats.eth.txpf;
- p_stats->tx_pfc_frames += port_stats.eth.txpp;
- p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
- p_stats->tx_total_collisions += port_stats.eth.tncl;
- p_stats->rx_mac_bytes += port_stats.eth.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
- p_stats->tx_mac_bytes += port_stats.eth.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
- p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
- p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
- p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
- p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
}
}
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index a834ac74..88c5ceb0 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -893,6 +893,30 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn, false,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -1556,10 +1580,9 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false,
"Received a critical error notification from the MFW!\n");
- if (p_hwfn->p_dev->mdump_en) {
+ if (p_hwfn->p_dev->allow_mdump) {
DP_NOTICE(p_hwfn, false,
"Not acknowledging the notification to allow the MFW crash dump\n");
- p_hwfn->p_dev->mdump_en = false;
return;
}
@@ -2894,6 +2917,27 @@ struct ecore_resc_alloc_out_params {
u32 flags;
};
+#define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
+
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ enum _ecore_status_t rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 37d1835f..77fb5a3c 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -171,6 +171,17 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_load_req_params *p_params);
/**
+ * @brief Sends a LOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 190c1352..abc190c9 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -736,6 +736,17 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
+ * @brief A recovery handler must call this function as its first step.
+ * It is assumed that the handler is not run from an interrupt context.
+ *
+ * @param p_dev
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev);
+
+/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 846dc6d1..c9c23096 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -94,359 +94,358 @@
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29804
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29805
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29806
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29807
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29936
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29937
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29938
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29953
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29954
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29957
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29958
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29959
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29960
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_38_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_39_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30023
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30024
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30039
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30040
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30051
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30052
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30053
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30054
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30310
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30566
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30822
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30823
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30824
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30825
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30841
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30857
+#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30873
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30874
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30875
+#define QM_REG_RLPFENABLE_RT_OFFSET 30871
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30891
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30907
+#define QM_REG_WFQPFCRD_RT_OFFSET 30905
#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31163
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31164
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31165
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31677
+#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32189
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32701
+#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33213
+#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33725
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34045
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34081
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34120
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34121
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34122
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34123
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34124
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34128
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34132
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34136
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34137
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34169
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34185
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34201
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34217
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34233
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34234
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34235
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34236
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34237
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34245
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34246
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34247
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34248
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34249
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34250
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34251
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34252
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34253
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34254
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34255
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34256
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34257
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34258
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34259
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34260
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34261
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34262
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34263
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34264
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34265
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34266
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34267
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34268
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34269
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34270
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34271
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34272
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34273
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34274
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34275
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34276
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34277
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34278
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34279
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34280
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34281
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34282
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34283
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34284
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34285
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34286
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34287
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34288
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34289
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34290
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34291
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34292
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34293
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34294
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34295
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34296
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34297
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34298
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34299
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34300
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34301
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34302
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34303
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34304
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34305
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34306
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34307
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34308
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34309
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34310
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
-#define RUNTIME_ARRAY_SIZE 34311
+#define RUNTIME_ARRAY_SIZE 34309
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 8fd64d7a..d6e4b9e0 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -165,23 +165,19 @@ static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
}
static void
-__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
-
- if (tun_type->b_mode_enabled)
- *p_enable_tx_clas = 1;
}
static void
-ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type,
u8 *p_update_port, __le16 *p_port,
struct ecore_tunn_update_udp_port *p_udp_port)
{
- __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
- tun_type);
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
@@ -200,33 +196,27 @@ ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
- p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
@@ -282,29 +272,24 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
}
@@ -345,7 +330,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
/* For easier debugging */
p_ramrod->dont_log_ramrods = 0;
- p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+ p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
switch (mode) {
case ECORE_MF_DEFAULT:
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 6dc969b0..45a0356d 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -79,6 +79,10 @@
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
+/* Minimum number of free BDs in RX ring, that guarantee receiving of at least
+ * one RX packet.
+ */
+#define ETH_RX_BD_THRESHOLD 12
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index fcf98477..1ad8a962 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -132,13 +132,28 @@ struct eth_stats {
u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
/* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
u64 r1518;
+ union {
+ struct { /* bb */
/* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */
- u64 r1522;
- u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
- u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
- u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+ u64 r1522;
+/* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
+ u64 r2047;
+/* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
+ u64 r4095;
+/* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+ u64 r9216;
/* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */
- u64 r16383;
+ u64 r16383;
+ } bb0;
+ struct { /* ah */
+ u64 unused1;
+/* 0x07 (Offset 0x38 ) RX 1519 to max byte frame counter*/
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
@@ -156,19 +171,40 @@ struct eth_stats {
u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
/* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
u64 t1518;
+ union {
+ struct { /* bb */
/* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
- u64 t2047;
+ u64 t2047;
/* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
- u64 t4095;
+ u64 t4095;
/* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
- u64 t9216;
+ u64 t9216;
/* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */
- u64 t16383;
+ u64 t16383;
+ } bb1;
+ struct { /* ah */
+/* 0x47 (Offset 0xd8 ) TX 1519 to max byte frame counter */
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
- u64 tlpiec;
- u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ union {
+ struct { /* bb */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+ u64 tlpiec;
+/* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ u64 tncl;
+ } bb2;
+ struct { /* ah */
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index f9920f37..60286545 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1200,3 +1200,8 @@
#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
+
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 0x501b00UL
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
deleted file mode 100644
index a3c0b137..00000000
--- a/drivers/net/qede/qede_eth_if.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#include "qede_ethdev.h"
-
-static int
-qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
-{
- int rc, i;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
- u8 tx_switching = 0;
- struct ecore_sp_vport_start_params start = { 0 };
-
- start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
- ECORE_TPA_MODE_NONE;
- start.remove_inner_vlan = p_params->remove_inner_vlan;
- start.tx_switching = tx_switching;
- start.only_untagged = false; /* untagged only */
- start.drop_ttl0 = p_params->drop_ttl0;
- start.concrete_fid = p_hwfn->hw_info.concrete_fid;
- start.opaque_fid = p_hwfn->hw_info.opaque_fid;
- start.concrete_fid = p_hwfn->hw_info.concrete_fid;
- start.handle_ptp_pkts = p_params->handle_ptp_pkts;
- start.vport_id = p_params->vport_id;
- start.mtu = p_params->mtu;
- /* @DPDK - Disable FW placement */
- start.zero_placement_offset = 1;
-
- rc = ecore_sp_vport_start(p_hwfn, &start);
- if (rc) {
- DP_ERR(edev, "Failed to start VPORT\n");
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started V-PORT %d with MTU %d\n",
- p_params->vport_id, p_params->mtu);
- }
-
- ecore_reset_vport_stats(edev);
-
- return 0;
-}
-
-static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
-{
- int rc, i;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_vport_stop(p_hwfn,
- p_hwfn->hw_info.opaque_fid, vport_id);
-
- if (rc) {
- DP_ERR(edev, "Failed to stop VPORT\n");
- return rc;
- }
- }
-
- return 0;
-}
-
-static int
-qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
-{
- struct ecore_sp_vport_update_params sp_params;
- struct ecore_rss_params sp_rss_params;
- int rc, i;
-
- memset(&sp_params, 0, sizeof(sp_params));
- memset(&sp_rss_params, 0, sizeof(sp_rss_params));
-
- /* Translate protocol params into sp params */
- sp_params.vport_id = params->vport_id;
- sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
- sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
- sp_params.vport_active_rx_flg = params->vport_active_flg;
- sp_params.vport_active_tx_flg = params->vport_active_flg;
- sp_params.update_inner_vlan_removal_flg =
- params->update_inner_vlan_removal_flg;
- sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
- sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
- sp_params.tx_switching_flg = params->tx_switching_flg;
- sp_params.accept_any_vlan = params->accept_any_vlan;
- sp_params.update_accept_any_vlan_flg =
- params->update_accept_any_vlan_flg;
- sp_params.mtu = params->mtu;
- sp_params.sge_tpa_params = params->sge_tpa_params;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
-
- sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_update(p_hwfn, &sp_params,
- ECORE_SPQ_MODE_EBLOCK, NULL);
- if (rc) {
- DP_ERR(edev, "Failed to update VPORT\n");
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Updated V-PORT %d: active_flag %d [update %d]\n",
- params->vport_id, params->vport_active_flg,
- params->update_vport_active_flg);
- }
-
- return 0;
-}
-
-static int
-qed_start_rxq(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- uint16_t bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size,
- struct ecore_rxq_start_ret_params *ret_params)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_num % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- p_params->queue_id = p_params->queue_id / edev->num_hwfns;
- p_params->stats_id = p_params->vport_id;
-
- rc = ecore_eth_rx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size,
- ret_params);
-
- if (rc) {
- DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
- p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
-
- return 0;
-}
-
-static int
-qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
- int rc, hwfn_index;
- struct ecore_hwfn *p_hwfn;
-
- hwfn_index = rss_id % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
- return rc;
- }
-
- return 0;
-}
-
-static int
-qed_start_txq(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- uint16_t pbl_size,
- struct ecore_txq_start_ret_params *ret_params)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_num % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- p_params->queue_id = p_params->queue_id / edev->num_hwfns;
- p_params->stats_id = p_params->vport_id;
-
- rc = ecore_eth_tx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params, 0 /* tc */,
- pbl_addr, pbl_size,
- ret_params);
-
- if (rc) {
- DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
- p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
-
- return 0;
-}
-
-static int
-qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_id % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
- return rc;
- }
-
- return 0;
-}
-
-static int
-qed_fp_cqe_completion(struct ecore_dev *edev,
- uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
-{
- return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
- cqe);
-}
-
-static int qed_fastpath_stop(struct ecore_dev *edev)
-{
- ecore_hw_stop_fastpath(edev);
-
- return 0;
-}
-
-static void qed_fastpath_start(struct ecore_dev *edev)
-{
- struct ecore_hwfn *p_hwfn;
- int i;
-
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- ecore_hw_start_fastpath(p_hwfn);
- }
-}
-
-static void
-qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
-{
- ecore_get_vport_stats(edev, stats);
-}
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
- enum qed_filter_rx_mode_type type)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_accept_flags flags;
-
- memset(&flags, 0, sizeof(flags));
-
- flags.update_rx_mode_config = 1;
- flags.update_tx_mode_config = 1;
- flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
- ECORE_ACCEPT_MCAST_MATCHED |
- ECORE_ACCEPT_BCAST;
-
- flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
- ECORE_ACCEPT_MCAST_MATCHED |
- ECORE_ACCEPT_BCAST;
-
- if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
- if (IS_VF(edev)) {
- flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
- DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
- }
- } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
- flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
- } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
- QED_FILTER_RX_MODE_TYPE_PROMISC)) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
- ECORE_ACCEPT_MCAST_UNMATCHED;
- }
-
- return ecore_filter_accept_cmd(edev, 0, flags, false, false,
- ECORE_SPQ_MODE_CB, NULL);
-}
-
-static const struct qed_eth_ops qed_eth_ops_pass = {
- INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
- INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
- INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
- INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
- INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
- INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
- INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
- INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
- INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
- INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
- INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
- INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
- INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
-};
-
-const struct qed_eth_ops *qed_get_eth_ops(void)
-{
- return &qed_eth_ops_pass;
-}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
deleted file mode 100644
index 097e0257..00000000
--- a/drivers/net/qede/qede_eth_if.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#ifndef _QEDE_ETH_IF_H
-#define _QEDE_ETH_IF_H
-
-#include "qede_if.h"
-
-/*forward decl */
-struct eth_slow_path_rx_cqe;
-
-#define INIT_STRUCT_FIELD(field, value) .field = value
-
-#define QED_ETH_INTERFACE_VERSION 609
-
-#define QEDE_MAX_MCAST_FILTERS 64
-
-enum qed_filter_rx_mode_type {
- QED_FILTER_RX_MODE_TYPE_REGULAR,
- QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
- QED_FILTER_RX_MODE_TYPE_PROMISC,
-};
-
-enum qed_filter_type {
- QED_FILTER_TYPE_UCAST,
- QED_FILTER_TYPE_MCAST,
- QED_FILTER_TYPE_RX_MODE,
- QED_MAX_FILTER_TYPES,
-};
-
-struct qed_dev_eth_info {
- struct qed_dev_info common;
-
- uint8_t num_queues;
- uint8_t num_tc;
-
- struct ether_addr port_mac;
- uint16_t num_vlan_filters;
- uint32_t num_mac_filters;
-
- /* Legacy VF - this affects the datapath */
- bool is_legacy;
-};
-
-struct qed_update_vport_params {
- uint8_t vport_id;
- uint8_t update_vport_active_flg;
- uint8_t vport_active_flg;
- uint8_t update_inner_vlan_removal_flg;
- uint8_t inner_vlan_removal_flg;
- uint8_t update_tx_switching_flg;
- uint8_t tx_switching_flg;
- uint8_t update_accept_any_vlan_flg;
- uint8_t accept_any_vlan;
- uint8_t update_rss_flg;
- uint16_t mtu;
- struct ecore_sge_tpa_params *sge_tpa_params;
-};
-
-struct qed_start_vport_params {
- bool remove_inner_vlan;
- bool handle_ptp_pkts;
- bool enable_lro;
- bool drop_ttl0;
- uint8_t vport_id;
- uint16_t mtu;
- bool clear_stats;
-};
-
-struct qed_eth_ops {
- const struct qed_common_ops *common;
-
- int (*fill_dev_info)(struct ecore_dev *edev,
- struct qed_dev_eth_info *info);
-
- int (*vport_start)(struct ecore_dev *edev,
- struct qed_start_vport_params *params);
-
- int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
-
- int (*vport_update)(struct ecore_dev *edev,
- struct qed_update_vport_params *params);
-
- int (*q_rx_start)(struct ecore_dev *cdev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- uint16_t bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size,
- struct ecore_rxq_start_ret_params *ret_params);
-
- int (*q_rx_stop)(struct ecore_dev *edev,
- uint8_t rss_id, void *handle);
-
- int (*q_tx_start)(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- uint16_t pbl_size,
- struct ecore_txq_start_ret_params *ret_params);
-
- int (*q_tx_stop)(struct ecore_dev *edev,
- uint8_t rss_id, void *handle);
-
- int (*eth_cqe_completion)(struct ecore_dev *edev,
- uint8_t rss_id,
- struct eth_slow_path_rx_cqe *cqe);
-
- int (*fastpath_stop)(struct ecore_dev *edev);
-
- void (*fastpath_start)(struct ecore_dev *edev);
-
- void (*get_vport_stats)(struct ecore_dev *edev,
- struct ecore_eth_stats *stats);
-};
-
-/* externs */
-
-extern const struct qed_common_ops qed_common_ops_pass;
-
-const struct qed_eth_ops *qed_get_eth_ops(void);
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
- enum qed_filter_rx_mode_type type);
-
-#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 9fae40b6..0e059898 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -125,143 +125,199 @@ struct rte_qede_xstats_name_off {
};
static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
- {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
+ {"rx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
{"rx_multicast_bytes",
- offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
{"rx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
- {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
+ {"rx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
{"rx_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
{"rx_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
- {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
+ {"tx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
{"tx_multicast_bytes",
- offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
{"tx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
- {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
+ {"tx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
{"tx_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
{"tx_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
{"rx_64_byte_packets",
- offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
{"rx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_65_to_127_byte_packets)},
{"rx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_128_to_255_byte_packets)},
{"rx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_256_to_511_byte_packets)},
{"rx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_512_to_1023_byte_packets)},
{"rx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
- {"rx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
- {"rx_1519_to_2047_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
- {"rx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
- {"rx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
- {"rx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- rx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_1024_to_1518_byte_packets)},
{"tx_64_byte_packets",
- offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
{"tx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_65_to_127_byte_packets)},
{"tx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_128_to_255_byte_packets)},
{"tx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_256_to_511_byte_packets)},
{"tx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_512_to_1023_byte_packets)},
{"tx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
- {"trx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
- {"tx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
- {"tx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
- {"tx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- tx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_1024_to_1518_byte_packets)},
{"rx_mac_crtl_frames",
- offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
{"tx_mac_control_frames",
- offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
- {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
- {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
+ {"rx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
+ {"tx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
{"rx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, rx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
{"tx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, tx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
- {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
- {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
+ {"rx_crc_errors",
+ offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
+ {"rx_align_errors",
+ offsetof(struct ecore_eth_stats_common, rx_align_errors)},
{"rx_carrier_errors",
- offsetof(struct ecore_eth_stats, rx_carrier_errors)},
+ offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
{"rx_oversize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_oversize_packets)},
- {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
+ offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
+ {"rx_jabber_errors",
+ offsetof(struct ecore_eth_stats_common, rx_jabbers)},
{"rx_undersize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_undersize_packets)},
- {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
+ offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
+ {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
{"rx_host_buffer_not_available",
- offsetof(struct ecore_eth_stats, no_buff_discards)},
+ offsetof(struct ecore_eth_stats_common, no_buff_discards)},
/* Number of packets discarded because they are bigger than MTU */
{"rx_packet_too_big_discards",
- offsetof(struct ecore_eth_stats, packet_too_big_discard)},
+ offsetof(struct ecore_eth_stats_common,
+ packet_too_big_discard)},
{"rx_ttl_zero_discards",
- offsetof(struct ecore_eth_stats, ttl0_discard)},
+ offsetof(struct ecore_eth_stats_common, ttl0_discard)},
{"rx_multi_function_tag_filter_discards",
- offsetof(struct ecore_eth_stats, mftag_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
{"rx_mac_filter_discards",
- offsetof(struct ecore_eth_stats, mac_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
{"rx_hw_buffer_truncates",
- offsetof(struct ecore_eth_stats, brb_truncates)},
+ offsetof(struct ecore_eth_stats_common, brb_truncates)},
{"rx_hw_buffer_discards",
- offsetof(struct ecore_eth_stats, brb_discards)},
- {"tx_lpi_entry_count",
- offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
- {"tx_total_collisions",
- offsetof(struct ecore_eth_stats, tx_total_collisions)},
+ offsetof(struct ecore_eth_stats_common, brb_discards)},
{"tx_error_drop_packets",
- offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
- {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
+ {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
{"rx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
{"rx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
{"rx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
{"rx_mac_frames_ok",
- offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
- {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
+ {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
{"tx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
{"tx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
{"tx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
{"lro_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
{"lro_coalesced_events",
- offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
{"lro_aborts_num",
- offsetof(struct ecore_eth_stats, tpa_aborts_num)},
+ offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
{"lro_not_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_not_coalesced_pkts)},
{"lro_coalesced_bytes",
- offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
+ {"rx_1519_to_1522_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_1522_byte_packets)},
+ {"rx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_2047_byte_packets)},
+ {"rx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_2048_to_4095_byte_packets)},
+ {"rx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_4096_to_9216_byte_packets)},
+ {"rx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_9217_to_16383_byte_packets)},
+
+ {"tx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_1519_to_2047_byte_packets)},
+ {"tx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_2048_to_4095_byte_packets)},
+ {"tx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_4096_to_9216_byte_packets)},
+ {"tx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_9217_to_16383_byte_packets)},
+
+ {"tx_lpi_entry_count",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
+ {"tx_total_collisions",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
+};
+
+static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
+ {"rx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ rx_1519_to_max_byte_packets)},
+ {"tx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ tx_1519_to_max_byte_packets)},
};
static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
@@ -294,7 +350,6 @@ static void
qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
{
rte_memcpy(&qdev->dev_info, info, sizeof(*info));
- qdev->num_tc = qdev->dev_info.num_tc;
qdev->ops = qed_ops;
}
@@ -308,9 +363,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, "*********************************\n");
DP_INFO(edev, " DPDK version:%s\n", rte_version());
- DP_INFO(edev, " Chip details : %s%d\n",
+ DP_INFO(edev, " Chip details : %s %c%d\n",
ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
+ 'A' + edev->chip_rev,
+ (int)edev->chip_metal);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
@@ -329,6 +385,178 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
}
#endif
+static int
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_start_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ /* @DPDK - Disable FW placement */
+ params.zero_placement_offset = 1;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+ }
+ ecore_reset_vport_stats(edev);
+ DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+ return 0;
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ uint8_t vport_id;
+ int rc;
+ int i;
+
+ vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+ vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* Activate or deactivate vport via vport-update */
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
+ int rc = -1;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_vport_active_rx_flg = 1;
+ params.update_vport_active_tx_flg = 1;
+ params.vport_active_rx_flg = flg;
+ params.vport_active_tx_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ break;
+ }
+ }
+ DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
+ return rc;
+}
+
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
+{
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
+ */
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
+/* Enable/disable LRO via vport-update */
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_sge_tpa_params tpa_params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
+ params.vport_id = 0;
+ params.sge_tpa_params = &tpa_params;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update LRO\n");
+ return -1;
+ }
+ }
+
+ DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
+
+ return 0;
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+ }
+ }
+ DP_INFO(edev, "MTU updated to %u\n", mtu);
+
+ return 0;
+}
+
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
memset(ucast, 0, sizeof(struct ecore_filter_ucast));
@@ -337,6 +565,43 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
/* ucast->assert_on_error = true; - For debug */
}
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_accept_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ flags.update_rx_mode_config = 1;
+ flags.update_tx_mode_config = 1;
+ flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (IS_VF(edev)) {
+ flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ }
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+ QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+ ECORE_SPQ_MODE_CB, NULL);
+}
static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
uint8_t clss, bool mode, bool mask)
{
@@ -363,6 +628,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
ETHER_ADDR_LEN) == 0) &&
+ ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
DP_ERR(edev, "Unicast MAC is already added"
" with vlan = %u, vni = %u\n",
@@ -565,49 +831,57 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_update_vport_params params = {
- .vport_id = 0,
- .accept_any_vlan = action,
- .update_accept_any_vlan_flg = 1,
- };
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- /* Proceed only if action actually needs to be performed */
- if (qdev->accept_any_vlan == action)
- return;
-
- rc = qdev->ops->vport_update(edev, &params);
- if (rc) {
- DP_ERR(edev, "Failed to %s accept-any-vlan\n",
- action ? "enable" : "disable");
- } else {
- DP_INFO(edev, "%s accept-any-vlan\n",
- action ? "enabled" : "disabled");
- qdev->accept_any_vlan = action;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to configure accept-any-vlan\n");
+ return;
+ }
}
+
+ DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
}
-static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
{
- struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_inner_vlan_removal_flg = 1;
- vport_update_params.inner_vlan_removal_flg = set_stripping;
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_inner_vlan_removal_flg = 1;
+ params.inner_vlan_removal_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return -1;
+ }
}
- qdev->vlan_strip_flg = set_stripping;
+ DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
return 0;
}
@@ -741,33 +1015,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
}
-static int qede_init_vport(struct qede_dev *qdev)
-{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_start_vport_params start = {0};
- int rc;
-
- start.remove_inner_vlan = 1;
- start.enable_lro = qdev->enable_lro;
- start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
- start.vport_id = 0;
- start.drop_ttl0 = false;
- start.clear_stats = 1;
- start.handle_ptp_pkts = 0;
-
- rc = qdev->ops->vport_start(edev, &start);
- if (rc) {
- DP_ERR(edev, "Start V-PORT failed %d\n", rc);
- return rc;
- }
-
- DP_INFO(edev,
- "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
- start.vport_id, ETHER_MTU);
-
- return 0;
-}
-
static void qede_prandom_bytes(uint32_t *buff)
{
uint8_t i;
@@ -818,33 +1065,119 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
return 0;
}
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ ecore_hw_start_fastpath(p_hwfn);
+ }
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Update MTU only if it has changed */
+ if (qdev->mtu != qdev->new_mtu) {
+ if (qede_update_mtu(eth_dev, qdev->new_mtu))
+ goto err;
+ qdev->mtu = qdev->new_mtu;
+ /* If MTU has changed then update TPA too */
+ if (qdev->enable_lro)
+ if (qede_enable_tpa(eth_dev, true))
+ goto err;
+ }
+
+ /* Start queues */
+ if (qede_start_queues(eth_dev))
+ goto err;
+
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ goto err;
+
+ /* Enable vport*/
+ if (qede_activate_vport(eth_dev, true))
+ goto err;
+
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
+ /* Start/resume traffic */
+ qede_fastpath_start(edev);
+
+ DP_INFO(edev, "Device started\n");
+
+ return 0;
+err:
+ DP_ERR(edev, "Device start fails\n");
+ return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Disable vport */
+ if (qede_activate_vport(eth_dev, false))
+ return;
+
+ if (qdev->enable_lro)
+ qede_enable_tpa(eth_dev, false);
+
+ /* TODO: Do we need disable LRO or RSS */
+ /* Stop queues */
+ qede_stop_queues(eth_dev);
+
+ /* Disable traffic */
+ ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
+
+ DP_INFO(edev, "Device is stopped\n");
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
- int rc;
PMD_INIT_FUNC_TRACE(edev);
/* Check requirements for 100G mode */
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
+ eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
/* Sanity checks and throw warnings */
- if (rxmode->enable_scatter == 1)
+ if (rxmode->enable_scatter)
eth_dev->data->scattered_rx = 1;
if (!rxmode->hw_strip_crc)
@@ -852,83 +1185,77 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (!rxmode->hw_ip_checksum)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
- "in hw\n");
-
- if (rxmode->enable_lro) {
- qdev->enable_lro = true;
- /* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
+ "in hw\n");
+ if (rxmode->header_split)
+ DP_INFO(edev, "Header split enable is not supported\n");
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
+ ETH_MQ_RX_RSS)) {
+ DP_ERR(edev, "Unsupported multi-queue mode\n");
+ return -ENOTSUP;
}
+ /* Flow director mode check */
+ if (qede_check_fdir_support(eth_dev))
+ return -ENOTSUP;
- /* Check for the port restart case */
- if (qdev->state != QEDE_DEV_INIT) {
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- return rc;
+ /* Deallocate resources if held previously. It is needed only if the
+ * queue count has been changed from previous configuration. If its
+ * going to change then it means RX/TX queue setup will be called
+ * again and the fastpath pointers will be reinitialized there.
+ */
+ if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
+ qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
qede_dealloc_fp_resc(eth_dev);
+ /* Proceed with updated queue count */
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
}
- qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
- qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
- qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
- /* Fastpath status block should be initialized before sending
- * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
+ /* VF's MTU has to be set using vport-start where as
+ * PF's MTU can be updated via vport-update.
*/
- rc = qede_alloc_fp_resc(qdev);
- if (rc != 0)
- return rc;
-
- /* Issue VPORT-START with default config values to allow
- * other port configurations early on.
- */
- rc = qede_init_vport(qdev);
- if (rc != 0)
- return rc;
-
- if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
- rxmode->mq_mode == ETH_MQ_RX_NONE)) {
- DP_ERR(edev, "Unsupported RSS mode\n");
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
+ if (IS_VF(edev)) {
+ if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
+ return -1;
+ } else {
+ if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
+ return -1;
}
- /* Flow director mode check */
- rc = qede_check_fdir_support(eth_dev);
- if (rc) {
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
- }
- SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+ qdev->mtu = rxmode->max_rx_pkt_len;
+ qdev->new_mtu = qdev->mtu;
- SLIST_INIT(&qdev->vlan_list_head);
+ /* Configure TPA parameters */
+ if (rxmode->enable_lro) {
+ if (qede_enable_tpa(eth_dev, true))
+ return -EINVAL;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+ qdev->enable_lro = rxmode->enable_lro;
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
- qdev->state = QEDE_DEV_CONFIG;
-
- DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
- (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
- qdev->num_tc);
+ DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+ QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
return 0;
}
/* Info about HW descriptor ring limitations */
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
- .nb_max = NUM_RX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 128,
.nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
- .nb_max = NUM_TX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 256,
.nb_align = 256,
.nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
@@ -946,7 +1273,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
@@ -1103,44 +1430,34 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- int rc;
PMD_INIT_FUNC_TRACE(edev);
- qede_fdir_dealloc_resc(eth_dev);
-
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
- if (qdev->state != QEDE_DEV_STOP)
+ if (eth_dev->data->dev_started)
qede_dev_stop(eth_dev);
- else
- DP_INFO(edev, "Device is already stopped\n");
-
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- DP_ERR(edev, "Failed to stop VPORT\n");
+ qede_stop_vport(edev);
+ qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
- qdev->ops->common->slowpath_stop(edev);
+ eth_dev->data->nb_rx_queues = 0;
+ eth_dev->data->nb_tx_queues = 0;
+ qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
-
rte_intr_disable(&pci_dev->intr_handle);
-
rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (edev->num_hwfns > 1)
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
-
- qdev->state = QEDE_DEV_INIT; /* Go back to init state */
}
static void
@@ -1153,35 +1470,36 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
/* RX Stats */
- eth_stats->ipackets = stats.rx_ucast_pkts +
- stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+ eth_stats->ipackets = stats.common.rx_ucast_pkts +
+ stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
- eth_stats->ibytes = stats.rx_ucast_bytes +
- stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+ eth_stats->ibytes = stats.common.rx_ucast_bytes +
+ stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
- eth_stats->ierrors = stats.rx_crc_errors +
- stats.rx_align_errors +
- stats.rx_carrier_errors +
- stats.rx_oversize_packets +
- stats.rx_jabbers + stats.rx_undersize_packets;
+ eth_stats->ierrors = stats.common.rx_crc_errors +
+ stats.common.rx_align_errors +
+ stats.common.rx_carrier_errors +
+ stats.common.rx_oversize_packets +
+ stats.common.rx_jabbers + stats.common.rx_undersize_packets;
- eth_stats->rx_nombuf = stats.no_buff_discards;
+ eth_stats->rx_nombuf = stats.common.no_buff_discards;
- eth_stats->imissed = stats.mftag_filter_discards +
- stats.mac_filter_discards +
- stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+ eth_stats->imissed = stats.common.mftag_filter_discards +
+ stats.common.mac_filter_discards +
+ stats.common.no_buff_discards +
+ stats.common.brb_truncates + stats.common.brb_discards;
/* TX stats */
- eth_stats->opackets = stats.tx_ucast_pkts +
- stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+ eth_stats->opackets = stats.common.tx_ucast_pkts +
+ stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
- eth_stats->obytes = stats.tx_ucast_bytes +
- stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+ eth_stats->obytes = stats.common.tx_ucast_bytes +
+ stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
- eth_stats->oerrors = stats.tx_err_drop_pkts;
+ eth_stats->oerrors = stats.common.tx_err_drop_pkts;
/* Queue stats */
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
@@ -1195,38 +1513,34 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
" appropriately and retry.\n");
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
- eth_stats->q_ipackets[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rcv_pkts));
- eth_stats->q_errors[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_hw_errors)) +
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_alloc_errors));
- i++;
- }
+ for_each_rss(qid) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
if (i == rxq_stat_cntrs)
break;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
- txq = qdev->fp_array[(qid)].txqs[0];
- eth_stats->q_opackets[j] =
- *((uint64_t *)(uintptr_t)
- (((uint64_t)(uintptr_t)(txq)) +
- offsetof(struct qede_tx_queue,
- xmit_pkts)));
- j++;
- }
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
if (j == txq_stat_cntrs)
break;
}
@@ -1234,18 +1548,27 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
- return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ if (ECORE_IS_BB(&qdev->edev))
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_bb_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ else
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_ah_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
-qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
+qede_get_xstats_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
__rte_unused unsigned int limit)
{
struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
unsigned int rxq_stat_cntrs;
@@ -1259,6 +1582,24 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_bb_xstats_strings[i].name);
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_ah_xstats_strings[i].name);
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
@@ -1290,7 +1631,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
if (n < num)
return num;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
@@ -1299,13 +1640,31 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_bb_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_ah_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for_each_rss(qid) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
+ ((char *)(qdev->fp_array[qid].rxq)) +
qede_rxq_xstats_strings[i].offset);
xstats[stat_idx].id = stat_idx;
stat_idx++;
@@ -1723,6 +2082,8 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
return 0;
}
+
+
static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
@@ -1756,19 +2117,17 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
rte_delay_ms(1000);
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
- for_each_queue(i) {
+ for_each_rss(i) {
fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- bufsz = (uint16_t)rte_pktmbuf_data_room_size(
- fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
- }
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
}
qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
@@ -1910,6 +2269,8 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
uint16_t filter_type;
int rc, i;
+ PMD_INIT_FUNC_TRACE(edev);
+
filter_type = conf->filter_type | qdev->vxlan_filter_type;
/* First determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
@@ -2163,7 +2524,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
@@ -2177,8 +2538,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DP_NOTICE(edev, false,
- "Skipping device init from secondary process\n");
+ DP_ERR(edev, "Skipping device init from secondary process\n");
return 0;
}
@@ -2195,20 +2555,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
}
DP_INFO(edev, "Starting qede probe\n");
-
- rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
- dp_module, dp_level, is_vf);
-
+ rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+ dp_level, is_vf);
if (rc != 0) {
DP_ERR(edev, "qede probe failed rc %d\n", rc);
return -ENODEV;
}
-
qede_update_pf_params(edev);
-
rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
@@ -2306,8 +2661,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
ether_addr_copy(&eth_dev->data->mac_addrs[0],
&adapter->primary_mac);
} else {
- DP_NOTICE(edev, false,
- "No VF macaddr assigned\n");
+ DP_ERR(edev, "No VF macaddr assigned\n");
}
}
}
@@ -2321,17 +2675,28 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
do_once = false;
}
- adapter->state = QEDE_DEV_INIT;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->vlan_list_head);
+ SLIST_INIT(&adapter->uc_list_head);
+ adapter->mtu = ETHER_MTU;
+ adapter->new_mtu = ETHER_MTU;
+ if (!is_vf)
+ if (qede_start_vport(adapter, adapter->mtu))
+ return -1;
- DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
- adapter->primary_mac.addr_bytes[0],
- adapter->primary_mac.addr_bytes[1],
- adapter->primary_mac.addr_bytes[2],
- adapter->primary_mac.addr_bytes[3],
- adapter->primary_mac.addr_bytes[4],
- adapter->primary_mac.addr_bytes[5]);
+ DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ adapter->primary_mac.addr_bytes[0],
+ adapter->primary_mac.addr_bytes[1],
+ adapter->primary_mac.addr_bytes[2],
+ adapter->primary_mac.addr_bytes[3],
+ adapter->primary_mac.addr_bytes[4],
+ adapter->primary_mac.addr_bytes[5]);
- return rc;
+ DP_INFO(edev, "Device initialized\n");
+
+ return 0;
}
static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
@@ -2346,6 +2711,13 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index e4323a0d..a3254b12 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -41,8 +41,6 @@
#include "qede_logs.h"
#include "qede_if.h"
-#include "qede_eth_if.h"
-
#include "qede_rxtx.h"
#define qede_stringify1(x...) #x
@@ -51,8 +49,8 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 4
-#define QEDE_PMD_VERSION_REVISION 0
+#define QEDE_PMD_VERSION_MINOR 5
+#define QEDE_PMD_VERSION_REVISION 2
#define QEDE_PMD_VERSION_PATCH 1
#define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \
@@ -73,12 +71,8 @@
(edev)->dev_info.num_tc)
#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
-#define QEDE_RSS_COUNT(qdev) ((qdev)->num_queues - (qdev)->fp_num_tx)
-#define QEDE_TSS_COUNT(qdev) (((qdev)->num_queues - (qdev)->fp_num_rx) * \
- (qdev)->num_tc)
-
-#define QEDE_FASTPATH_TX (1 << 0)
-#define QEDE_FASTPATH_RX (1 << 1)
+#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues)
+#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues)
#define QEDE_DUPLEX_FULL 1
#define QEDE_DUPLEX_HALF 2
@@ -138,12 +132,12 @@ extern char fw_file[];
/* Maximum number of flowdir filters */
#define QEDE_RFS_MAX_FLTR (256)
-/* Port/function states */
-enum qede_dev_state {
- QEDE_DEV_INIT, /* Init the chip and Slowpath */
- QEDE_DEV_CONFIG, /* Create Vport/Fastpath resources */
- QEDE_DEV_START, /* Start RX/TX queues, enable traffic */
- QEDE_DEV_STOP, /* Deactivate vport and stop traffic */
+#define QEDE_MAX_MCAST_FILTERS (64)
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
};
struct qede_vlan_entry {
@@ -183,23 +177,20 @@ struct qede_fdir_info {
*/
struct qede_dev {
struct ecore_dev edev;
- uint8_t protocol;
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
struct ecore_sb_info *sb_array;
struct qede_fastpath *fp_array;
- uint8_t num_tc;
uint16_t mtu;
+ uint16_t new_mtu;
bool rss_enable;
struct rte_eth_rss_conf rss_conf;
uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
uint64_t rss_hf;
uint8_t rss_key_len;
bool enable_lro;
- uint16_t num_queues;
- uint8_t fp_num_tx;
- uint8_t fp_num_rx;
- enum qede_dev_state state;
+ uint8_t num_rx_queues;
+ uint8_t num_tx_queues;
SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head;
uint16_t configured_vlans;
bool accept_any_vlan;
@@ -248,4 +239,10 @@ uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg);
+
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
+
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
+
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 405c525e..9864bb44 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -50,14 +50,26 @@ struct qed_dev_info {
bool geneve_enable;
};
-enum qed_sb_type {
- QED_SB_TYPE_L2_QUEUE,
- QED_SB_TYPE_STORAGE,
- QED_SB_TYPE_CNQ,
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ uint8_t num_queues;
+ uint8_t num_tc;
+
+ struct ether_addr port_mac;
+ uint16_t num_vlan_filters;
+ uint32_t num_mac_filters;
+
+ /* Legacy VF - this affects the datapath */
+ bool is_legacy;
};
-enum qed_protocol {
- QED_PROTOCOL_ETH,
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+ int (*fill_dev_info)(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
};
struct qed_link_params {
@@ -99,64 +111,13 @@ struct qed_slowpath_params {
uint8_t name[NAME_SIZE];
};
-#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
-
-struct qed_eth_tlvs {
- u16 feat_flags;
- u8 mac[3][ETH_ALEN];
- u16 lso_maxoff;
- u16 lso_minseg;
- bool prom_mode;
- u16 num_txqs;
- u16 num_rxqs;
- u16 num_netqs;
- u16 flex_vlan;
- u32 tcp4_offloads;
- u32 tcp6_offloads;
- u16 tx_avg_qdepth;
- u16 rx_avg_qdepth;
- u8 txqs_empty;
- u8 rxqs_empty;
- u8 num_txqs_full;
- u8 num_rxqs_full;
-};
-
-struct qed_tunn_update_params {
- unsigned long tunn_mode_update_mask;
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
-};
-
struct qed_common_cb_ops {
void (*link_update)(void *dev, struct qed_link_output *link);
- void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
-};
-
-struct qed_selftest_ops {
-/**
- * @brief registers - Perform register tests
- *
- * @param edev
- *
- * @return 0 on success, error otherwise.
- */
- int (*registers)(struct ecore_dev *edev);
};
struct qed_common_ops {
int (*probe)(struct ecore_dev *edev,
struct rte_pci_device *pci_dev,
- enum qed_protocol protocol,
uint32_t dp_module, uint8_t dp_level, bool is_vf);
void (*set_name)(struct ecore_dev *edev, char name[]);
enum _ecore_status_t
@@ -196,7 +157,7 @@ struct qed_common_ops {
struct ecore_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr,
- uint16_t sb_id, enum qed_sb_type type);
+ uint16_t sb_id);
int (*get_sb_info)(struct ecore_dev *edev,
struct ecore_sb_info *sb, u16 qid,
@@ -210,4 +171,8 @@ struct qed_common_ops {
int (*send_drv_state)(struct ecore_dev *edev, bool active);
};
+/* Externs */
+
+const struct qed_eth_ops *qed_get_eth_ops(void);
+
#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 25c14d8b..15821151 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -16,16 +16,21 @@
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__)
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
- rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
- "[QEDE PMD: (%s)]%s:" fmt, \
- (p_dev)->name ? (p_dev)->name : "", \
- __func__, \
- ##__VA_ARGS__)
-#else
-#define DP_NOTICE(p_dev, fmt, ...) do { } while (0)
-#endif
+do { \
+ if (is_assert) \
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+ else \
+ rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+} while (0)
#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_INFO(p_dev, fmt, ...) \
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 712c03fd..a6ff7af2 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -19,7 +19,7 @@
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.18.9.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.20.0.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -40,16 +40,14 @@ static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
static int
qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
- enum qed_protocol protocol, uint32_t dp_module,
- uint8_t dp_level, bool is_vf)
+ uint32_t dp_module, uint8_t dp_level, bool is_vf)
{
struct ecore_hw_prepare_params hw_prepare_params;
- struct qede_dev *qdev = (struct qede_dev *)edev;
int rc;
ecore_init_struct(edev);
edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
- qdev->protocol = protocol;
+ /* Protocol type is always fixed to PROTOCOL_ETH */
if (is_vf)
edev->b_is_vf = true;
@@ -62,6 +60,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
hw_prepare_params.drv_resc_alloc = false;
hw_prepare_params.chk_reg_fifo = false;
hw_prepare_params.initiate_pf_flr = true;
+ hw_prepare_params.allow_mdump = false;
hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
@@ -132,12 +131,12 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
fd = open(fw_file, O_RDONLY);
if (fd < 0) {
- DP_NOTICE(edev, false, "Can't open firmware file\n");
+ DP_ERR(edev, "Can't open firmware file\n");
return -ENOENT;
}
if (fstat(fd, &st) < 0) {
- DP_NOTICE(edev, false, "Can't stat firmware file\n");
+ DP_ERR(edev, "Can't stat firmware file\n");
close(fd);
return -1;
}
@@ -145,20 +144,20 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
edev->firmware = rte_zmalloc("qede_fw", st.st_size,
RTE_CACHE_LINE_SIZE);
if (!edev->firmware) {
- DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
+ DP_ERR(edev, "Can't allocate memory for firmware\n");
close(fd);
return -ENOMEM;
}
if (read(fd, edev->firmware, st.st_size) != st.st_size) {
- DP_NOTICE(edev, false, "Can't read firmware data\n");
+ DP_ERR(edev, "Can't read firmware data\n");
close(fd);
return -1;
}
edev->fw_len = st.st_size;
if (edev->fw_len < 104) {
- DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
+ DP_ERR(edev, "Invalid fw size: %" PRIu64 "\n",
edev->fw_len);
close(fd);
return -EINVAL;
@@ -262,8 +261,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(edev);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed to allocate stream memory\n");
+ DP_ERR(edev, "Failed to allocate stream memory\n");
goto err1;
}
}
@@ -303,8 +301,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed sending drv version command\n");
+ DP_ERR(edev, "Failed sending drv version command\n");
goto err3;
}
}
@@ -460,23 +457,14 @@ static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
static uint32_t
qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
- void *sb_virt_addr, dma_addr_t sb_phy_addr,
- uint16_t sb_id, enum qed_sb_type type)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, uint16_t sb_id)
{
struct ecore_hwfn *p_hwfn;
int hwfn_index;
uint16_t rel_sb_id;
- uint8_t n_hwfns;
+ uint8_t n_hwfns = edev->num_hwfns;
uint32_t rc;
- /* RoCE uses single engine and CMT uses two engines. When using both
- * we force only a single engine. Storage uses only engine 0 too.
- */
- if (type == QED_SB_TYPE_L2_QUEUE)
- n_hwfns = edev->num_hwfns;
- else
- n_hwfns = 1;
-
hwfn_index = sb_id % n_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
rel_sb_id = sb_id / n_hwfns;
@@ -617,7 +605,7 @@ static int qed_drain(struct ecore_dev *edev)
hwfn = &edev->hwfns[i];
ptt = ecore_ptt_acquire(hwfn);
if (!ptt) {
- DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
+ DP_ERR(hwfn, "Failed to drain NIG; No PTT\n");
return -EBUSY;
}
rc = ecore_mcp_drain(hwfn, ptt);
@@ -710,7 +698,7 @@ static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
ptt = ecore_ptt_acquire(hwfn);
if (!ptt) {
- DP_NOTICE(hwfn, true, "Can't acquire PTT\n");
+ DP_ERR(hwfn, "Can't acquire PTT\n");
return -EAGAIN;
}
@@ -737,3 +725,13 @@ const struct qed_common_ops qed_common_ops_pass = {
INIT_STRUCT_FIELD(remove, &qed_remove),
INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
};
+
+const struct qed_eth_ops qed_eth_ops_pass = {
+ INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+ INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+ return &qed_eth_ops_pass;
+}
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index f5aa43dd..5c3613c7 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -37,64 +37,20 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
return 0;
}
-static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
-{
- uint16_t i;
-
- if (rxq->sw_rx_ring != NULL) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_rx_ring[i].mbuf != NULL) {
- rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
- rxq->sw_rx_ring[i].mbuf = NULL;
- }
- }
- }
-}
-
-void qede_rx_queue_release(void *rx_queue)
-{
- struct qede_rx_queue *rxq = rx_queue;
-
- if (rxq != NULL) {
- qede_rx_queue_release_mbufs(rxq);
- rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
- rte_free(rxq);
- rxq = NULL;
- }
-}
-
-static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
-{
- unsigned int i;
-
- PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
-
- if (txq->sw_tx_ring) {
- for (i = 0; i < txq->nb_tx_desc; i++) {
- if (txq->sw_tx_ring[i].mbuf) {
- rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
- txq->sw_tx_ring[i].mbuf = NULL;
- }
- }
- }
-}
-
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
__rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct qede_dev *qdev = dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
uint16_t max_rx_pkt_len;
uint16_t bufsz;
size_t size;
int rc;
- int i;
PMD_INIT_FUNC_TRACE(edev);
@@ -126,6 +82,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
+
max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
qdev->mtu = max_rx_pkt_len;
@@ -138,6 +95,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dev->data->scattered_rx = 1;
}
}
+
if (dev->data->scattered_rx)
rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
@@ -153,11 +111,9 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq->sw_rx_ring) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for sw_rx_ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
+ " socket %u\n", socket_id);
rte_free(rxq);
- rxq = NULL;
return -ENOMEM;
}
@@ -172,13 +128,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for rxbd ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for RX BD ring"
+ " on socket %u\n", socket_id);
rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
rte_free(rxq);
- rxq = NULL;
return -ENOMEM;
}
@@ -193,50 +146,91 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for cqe ring on socket %u\n",
- socket_id);
- /* TBD: Freeing RX BD ring */
+ DP_ERR(edev, "Memory allocation fails for RX CQE ring"
+ " on socket %u\n", socket_id);
+ qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
rte_free(rxq);
return -ENOMEM;
}
- /* Allocate buffers for the Rx ring */
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- rc = qede_alloc_rx_buffer(rxq);
- if (rc) {
- DP_NOTICE(edev, false,
- "RX buffer allocation failed at idx=%d\n", i);
- goto err4;
- }
- }
-
dev->data->rx_queues[queue_idx] = rxq;
+ qdev->fp_array[queue_idx].rxq = rxq;
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
queue_idx, nb_desc, qdev->mtu, socket_id);
return 0;
-err4:
- qede_rx_queue_release(rxq);
- return -ENOMEM;
}
-void qede_tx_queue_release(void *tx_queue)
+static void
+qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq)
{
- struct qede_tx_queue *txq = tx_queue;
+ DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
+ ecore_chain_reset(&rxq->rx_bd_ring);
+ ecore_chain_reset(&rxq->rx_comp_ring);
+ rxq->sw_rx_prod = 0;
+ rxq->sw_rx_cons = 0;
+ *rxq->hw_cons_ptr = 0;
+}
- if (txq != NULL) {
- qede_tx_queue_release_mbufs(txq);
- if (txq->sw_tx_ring) {
- rte_free(txq->sw_tx_ring);
- txq->sw_tx_ring = NULL;
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (rxq->sw_rx_ring) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_rx_ring[i].mbuf) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+ rxq->sw_rx_ring[i].mbuf = NULL;
+ }
}
- rte_free(txq);
}
- txq = NULL;
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+ struct qede_rx_queue *rxq = rx_queue;
+
+ if (rxq) {
+ qede_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_rx_ring);
+ rte_free(rxq);
+ }
+}
+
+/* Stops a given RX queue in the HW */
+static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_rx_queue *rxq;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
+ true, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
+ return -1;
+ }
+ qede_rx_queue_release_mbufs(rxq);
+ qede_rx_queue_reset(qdev, rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
}
int
@@ -305,6 +299,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
DP_ERR(edev,
"Unable to allocate memory for txbd ring on socket %u",
socket_id);
+ qdev->ops->common->chain_free(edev, &txq->tx_pbl);
qede_tx_queue_release(txq);
return -ENOMEM;
}
@@ -318,6 +313,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
(txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
dev->data->tx_queues[queue_idx] = txq;
+ qdev->fp_array[queue_idx].txq = txq;
DP_INFO(edev,
"txq %u num_desc %u tx_free_thresh %u socket %u\n",
@@ -326,71 +322,40 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
-static void qede_init_fp(struct qede_dev *qdev)
+static void
+qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_tx_queue *txq)
{
- struct qede_fastpath *fp;
- uint8_t i;
- int fp_rx = qdev->fp_num_rx;
-
- memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
- sizeof(*qdev->fp_array)));
- memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
- sizeof(*qdev->sb_array)));
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp_rx) {
- fp->type = QEDE_FASTPATH_RX;
- fp_rx--;
- } else{
- fp->type = QEDE_FASTPATH_TX;
- }
- fp->qdev = qdev;
- fp->id = i;
- fp->sb_info = &qdev->sb_array[i];
- snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
- }
-
+ DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
+ ecore_chain_reset(&txq->tx_pbl);
+ txq->sw_tx_cons = 0;
+ txq->sw_tx_prod = 0;
+ *txq->hw_cons_ptr = 0;
}
-void qede_free_fp_arrays(struct qede_dev *qdev)
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
{
- /* It asseumes qede_free_mem_load() is called before */
- if (qdev->fp_array != NULL) {
- rte_free(qdev->fp_array);
- qdev->fp_array = NULL;
- }
+ uint16_t i;
- if (qdev->sb_array != NULL) {
- rte_free(qdev->sb_array);
- qdev->sb_array = NULL;
+ if (txq->sw_tx_ring) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_tx_ring[i].mbuf) {
+ rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+ txq->sw_tx_ring[i].mbuf = NULL;
+ }
+ }
}
}
-static int qede_alloc_fp_array(struct qede_dev *qdev)
+void qede_tx_queue_release(void *tx_queue)
{
- struct ecore_dev *edev = &qdev->edev;
-
- qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
- sizeof(*qdev->fp_array),
- RTE_CACHE_LINE_SIZE);
-
- if (!qdev->fp_array) {
- DP_ERR(edev, "fp array allocation failed\n");
- return -ENOMEM;
- }
-
- qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
- sizeof(*qdev->sb_array),
- RTE_CACHE_LINE_SIZE);
+ struct qede_tx_queue *txq = tx_queue;
- if (!qdev->sb_array) {
- DP_ERR(edev, "sb array allocation failed\n");
- rte_free(qdev->fp_array);
- return -ENOMEM;
+ if (txq) {
+ qede_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_tx_ring);
+ rte_free(txq);
}
-
- return 0;
}
/* This function allocates fast-path status block memory */
@@ -398,24 +363,23 @@ static int
qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
uint16_t sb_id)
{
- struct ecore_dev *edev = &qdev->edev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
- sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
-
+ sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
+ sizeof(struct status_block));
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
}
-
- rc = qdev->ops->common->sb_init(edev, sb_info,
- sb_virt, sb_phys, sb_id,
- QED_SB_TYPE_L2_QUEUE);
+ rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
+ sb_phys, sb_id);
if (rc) {
DP_ERR(edev, "Status block initialization failed\n");
- /* TBD: No dma_free_coherent possible */
+ OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
+ sizeof(struct status_block));
return rc;
}
@@ -427,9 +391,7 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
struct ecore_dev *edev = &qdev->edev;
struct qede_fastpath *fp;
uint32_t num_sbs;
- uint16_t i;
uint16_t sb_idx;
- int rc;
if (IS_VF(edev))
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
@@ -442,25 +404,31 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
return -EINVAL;
}
- if (qdev->fp_array)
- qede_free_fp_arrays(qdev);
+ qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
+ sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
- rc = qede_alloc_fp_array(qdev);
- if (rc != 0)
- return rc;
+ if (!qdev->fp_array) {
+ DP_ERR(edev, "fp array allocation failed\n");
+ return -ENOMEM;
+ }
- qede_init_fp(qdev);
+ memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
+ sizeof(*qdev->fp_array));
- for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
- fp = &qdev->fp_array[i];
- if (IS_VF(edev))
- sb_idx = i % num_sbs;
- else
- sb_idx = i;
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!fp->sb_info) {
+ DP_ERR(edev, "FP sb_info allocation fails\n");
+ return -1;
+ }
if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
- qede_free_fp_arrays(qdev);
- return -ENOMEM;
+ DP_ERR(edev, "FP status block allocation fails\n");
+ return -1;
}
+ DP_INFO(edev, "sb_info idx 0x%x initialized\n",
+ fp->sb_info->igu_sb_id);
}
return 0;
@@ -469,9 +437,54 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_fastpath *fp;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txq;
+ uint16_t sb_idx;
+ uint8_t i;
- qede_free_mem_load(eth_dev);
- qede_free_fp_arrays(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
+
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ DP_INFO(edev, "Free sb_info index 0x%x\n",
+ fp->sb_info->igu_sb_id);
+ if (fp->sb_info) {
+ OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
+ fp->sb_info->sb_phys,
+ sizeof(struct status_block));
+ rte_free(fp->sb_info);
+ fp->sb_info = NULL;
+ }
+ }
+
+ /* Free packet buffers and ring memories */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ if (eth_dev->data->rx_queues[i]) {
+ qede_rx_queue_release(eth_dev->data->rx_queues[i]);
+ rxq = eth_dev->data->rx_queues[i];
+ qdev->ops->common->chain_free(edev,
+ &rxq->rx_bd_ring);
+ qdev->ops->common->chain_free(edev,
+ &rxq->rx_comp_ring);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ if (eth_dev->data->tx_queues[i]) {
+ txq = eth_dev->data->tx_queues[i];
+ qede_tx_queue_release(eth_dev->data->tx_queues[i]);
+ qdev->ops->common->chain_free(edev,
+ &txq->tx_pbl);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ if (qdev->fp_array)
+ rte_free(qdev->fp_array);
+ qdev->fp_array = NULL;
}
static inline void
@@ -506,165 +519,297 @@ qede_update_rx_prod(__rte_unused struct qede_dev *edev,
PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
}
-static void
-qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
- uint16_t mtu, bool enable)
-{
- /* Enable LRO in split mode */
- sge_tpa_params->tpa_ipv4_en_flg = enable;
- sge_tpa_params->tpa_ipv6_en_flg = enable;
- sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
- sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
- /* set if tpa enable changes */
- sge_tpa_params->update_tpa_en_flg = 1;
- /* set if tpa parameters should be handled */
- sge_tpa_params->update_tpa_param_flg = enable;
-
- sge_tpa_params->max_buffers_per_cqe = 20;
- /* Enable TPA in split mode. In this mode each TPA segment
- * starts on the new BD, so there is one BD per segment.
- */
- sge_tpa_params->tpa_pkt_split_flg = 1;
- sge_tpa_params->tpa_hdr_data_split_flg = 0;
- sge_tpa_params->tpa_gro_consistent_flg = 0;
- sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
- sge_tpa_params->tpa_max_size = 0x7FFF;
- sge_tpa_params->tpa_min_size_to_start = mtu / 2;
- sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
-}
-
-static int qede_start_queues(struct rte_eth_dev *eth_dev,
- __rte_unused bool clear_stats)
+/* Starts a given RX queue in HW */
+static int
+qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- struct ecore_queue_start_common_params q_params;
- struct qed_dev_info *qed_info = &qdev->dev_info.common;
- struct qed_update_vport_params vport_update_params;
- struct ecore_sge_tpa_params tpa_params;
- struct qede_tx_queue *txq;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_rxq_start_ret_params ret_params;
+ struct qede_rx_queue *rxq;
struct qede_fastpath *fp;
+ struct ecore_hwfn *p_hwfn;
dma_addr_t p_phys_table;
- int txq_index;
uint16_t page_cnt;
- int rc, tc, i;
-
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- struct ecore_rxq_start_ret_params ret_params;
-
- p_phys_table =
- ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
- page_cnt =
- ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
-
- memset(&ret_params, 0, sizeof(ret_params));
- memset(&q_params, 0, sizeof(q_params));
- q_params.queue_id = i;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = RX_PI;
-
- ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
-
- rc = qdev->ops->q_rx_start(edev, i, &q_params,
- fp->rxq->rx_buf_size,
- fp->rxq->rx_bd_ring.p_phys_addr,
- p_phys_table,
- page_cnt,
- &ret_params);
+ uint16_t j;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ fp = &qdev->fp_array[rx_queue_id];
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ /* Allocate buffers for the Rx ring */
+ for (j = 0; j < rxq->nb_rx_desc; j++) {
+ rc = qede_alloc_rx_buffer(rxq);
if (rc) {
- DP_ERR(edev, "Start rxq #%d failed %d\n",
- fp->rxq->queue_id, rc);
- return rc;
+ DP_ERR(edev, "RX buffer allocation failed"
+ " for rxq = %u\n", rx_queue_id);
+ return -ENOMEM;
}
+ }
+ /* disable interrupts */
+ ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+ /* Prepare ramrod */
+ memset(&params, 0, sizeof(params));
+ params.queue_id = rx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.sb = fp->sb_info->igu_sb_id;
+ DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
+ fp->rxq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = RX_PI;
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+ memset(&ret_params, 0, sizeof(ret_params));
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc) {
+ DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
+ rx_queue_id, rc);
+ return -1;
+ }
+ /* Update with the returned parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ qede_update_rx_prod(qdev, fp->rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
- /* Use the return parameters */
- fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
- fp->rxq->handle = ret_params.p_handle;
+ return rc;
+}
- fp->rxq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[RX_PI];
+static int
+qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_txq_start_ret_params ret_params;
+ struct ecore_hwfn *p_hwfn;
+ dma_addr_t p_phys_table;
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ uint16_t page_cnt;
+ int hwfn_index;
+ int rc;
- qede_update_rx_prod(qdev, fp->rxq);
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ fp = &qdev->fp_array[tx_queue_id];
+ memset(&params, 0, sizeof(params));
+ params.queue_id = tx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.sb = fp->sb_info->igu_sb_id;
+ DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
+ fp->txq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = TX_PI(0); /* tc = 0 */
+ p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ if (qdev->dev_info.is_legacy)
+ fp->txq->is_legacy = true;
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, 0 /* tc */,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
+ tx_queue_id, rc);
+ return -1;
}
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
+ DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
+ }
- if (!(fp->type & QEDE_FASTPATH_TX))
- continue;
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct ecore_txq_start_ret_params ret_params;
+ return rc;
+}
- txq = fp->txqs[tc];
- txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
+{
+ struct rte_mbuf *mbuf;
+ uint16_t nb_segs;
+ uint16_t idx;
- p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
- page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ idx = TX_CONS(txq);
+ mbuf = txq->sw_tx_ring[idx].mbuf;
+ if (mbuf) {
+ nb_segs = mbuf->nb_segs;
+ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ txq->sw_tx_cons++;
+ PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+ } else {
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ }
+}
- memset(&q_params, 0, sizeof(q_params));
- memset(&ret_params, 0, sizeof(ret_params));
- q_params.queue_id = txq->queue_id;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = TX_PI(tc);
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ uint16_t sw_tx_cons;
+#endif
- rc = qdev->ops->q_tx_start(edev, i, &q_params,
- p_phys_table,
- page_cnt, /* **pp_doorbell */
- &ret_params);
- if (rc) {
- DP_ERR(edev, "Start txq %u failed %d\n",
- txq_index, rc);
- return rc;
+ rte_compiler_barrier();
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+ abs(hw_bd_cons - sw_tx_cons));
+#endif
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
+ qede_free_tx_pkt(txq);
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+ struct qede_tx_queue *txq, bool allow_drain)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ qede_process_tx_compl(edev, txq);
+ if (!cnt) {
+ if (allow_drain) {
+ DP_ERR(edev, "Tx queue[%u] is stuck,"
+ "requesting MCP to drain\n",
+ txq->queue_id);
+ rc = qdev->ops->common->drain(edev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(qdev, txq, false);
}
+ DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
+ "PROD=%d, CONS=%d\n",
+ txq->queue_id, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -1;
+ }
+ cnt--;
+ DELAY(1000);
+ rte_compiler_barrier();
+ }
- txq->doorbell_addr = ret_params.p_doorbell;
- txq->handle = ret_params.p_handle;
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ DELAY(2000);
- txq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
- DB_AGG_CMD_SET);
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_ETH_TX_BD_PROD_CMD);
+ return 0;
+}
+
+/* Stops a given TX queue in the HW */
+static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_tx_queue *txq;
+ int hwfn_index;
+ int rc;
- txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ /* Drain txq */
+ if (qede_drain_txq(qdev, txq, true))
+ return -1; /* For the lack of retcodes */
+ /* Stop txq */
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
+ return -1;
}
+ qede_tx_queue_release_mbufs(txq);
+ qede_tx_queue_reset(qdev, txq);
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
}
- /* Prepare and send the vport enable */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- /* Update MTU via vport update */
- vport_update_params.mtu = qdev->mtu;
- vport_update_params.vport_id = 0;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 1;
-
- /* @DPDK */
- if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
- /* TBD: Check SRIOV enabled for VF */
- vport_update_params.update_tx_switching_flg = 1;
- vport_update_params.tx_switching_flg = 1;
+ return rc;
+}
+
+int qede_start_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+ int rc;
+
+ for_each_rss(id) {
+ rc = qede_rx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
}
- /* TPA */
- if (qdev->enable_lro) {
- DP_INFO(edev, "Enabling LRO\n");
- memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
- qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
- vport_update_params.sge_tpa_params = &tpa_params;
+ for_each_tss(id) {
+ rc = qede_tx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
}
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
+ return rc;
+}
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+
+ /* Stopping RX/TX queues */
+ for_each_tss(id) {
+ qede_tx_queue_stop(eth_dev, id);
}
- return 0;
+ for_each_rss(id) {
+ qede_rx_queue_stop(eth_dev, id);
+ }
}
static bool qede_tunn_exist(uint16_t flag)
@@ -937,7 +1082,7 @@ qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
pkt_len;
if (unlikely(!cur_size)) {
PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
- " left for mapping jumbo", num_segs);
+ " left for mapping jumbo\n", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}
@@ -961,7 +1106,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct qede_rx_queue *rxq = p_rxq;
struct qede_dev *qdev = rxq->qdev;
struct ecore_dev *edev = &qdev->edev;
- struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
uint16_t rx_pkt = 0;
union eth_rx_cqe *cqe;
@@ -1047,7 +1191,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto tpa_end;
case ETH_RX_CQE_TYPE_SLOW_PATH:
PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
- qdev->ops->eth_cqe_completion(edev, fp->id,
+ ecore_eth_cqe_completion(
+ &edev->hwfns[rxq->queue_id % edev->num_hwfns],
(struct eth_slow_path_rx_cqe *)cqe);
/* fall-thru */
default:
@@ -1235,53 +1380,6 @@ next_cqe:
return rx_pkt;
}
-static inline void
-qede_free_tx_pkt(struct qede_tx_queue *txq)
-{
- struct rte_mbuf *mbuf;
- uint16_t nb_segs;
- uint16_t idx;
-
- idx = TX_CONS(txq);
- mbuf = txq->sw_tx_ring[idx].mbuf;
- if (mbuf) {
- nb_segs = mbuf->nb_segs;
- PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
- while (nb_segs) {
- /* It's like consuming rxbuf in recv() */
- ecore_chain_consume(&txq->tx_pbl);
- txq->nb_tx_avail++;
- nb_segs--;
- }
- rte_pktmbuf_free(mbuf);
- txq->sw_tx_ring[idx].mbuf = NULL;
- txq->sw_tx_cons++;
- PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
- } else {
- ecore_chain_consume(&txq->tx_pbl);
- txq->nb_tx_avail++;
- }
-}
-
-static inline void
-qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
- struct qede_tx_queue *txq)
-{
- uint16_t hw_bd_cons;
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
- uint16_t sw_tx_cons;
-#endif
-
- rte_compiler_barrier();
- hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
- sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
- PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
- abs(hw_bd_cons - sw_tx_cons));
-#endif
- while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
- qede_free_tx_pkt(txq);
-}
/* Populate scatter gather buffer descriptor fields */
static inline uint8_t
@@ -1378,7 +1476,9 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
uint64_t ol_flags;
struct rte_mbuf *m;
uint16_t i;
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
int ret;
+#endif
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
@@ -1411,14 +1511,6 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
break;
}
#endif
- /* TBD: pseudo csum calcuation required iff
- * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
- */
- ret = rte_net_intel_cksum_prepare(m);
- if (ret != 0) {
- rte_errno = ret;
- break;
- }
}
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
@@ -1429,6 +1521,27 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
return i;
}
+#define MPLSINUDP_HDR_SIZE (12)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
+ struct qede_tx_queue *txq)
+{
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+}
+#endif
+
uint16_t
qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -1443,14 +1556,30 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_frags;
uint16_t nb_pkt_sent = 0;
uint8_t nbds;
- bool ipv6_ext_flg;
bool lso_flg;
- bool tunn_flg;
+ bool mplsoudp_flg;
+ __rte_unused bool tunn_flg;
+ bool tunn_ipv6_ext_flg;
struct eth_tx_1st_bd *bd1;
struct eth_tx_2nd_bd *bd2;
struct eth_tx_3rd_bd *bd3;
uint64_t tx_ol_flags;
uint16_t hdr_size;
+ /* BD1 */
+ uint16_t bd1_bf;
+ uint8_t bd1_bd_flags_bf;
+ uint16_t vlan;
+ /* BD2 */
+ uint16_t bd2_bf1;
+ uint16_t bd2_bf2;
+ /* BD3 */
+ uint16_t mss;
+ uint16_t bd3_bf;
+
+ uint8_t tunn_l4_hdr_start_offset;
+ uint8_t tunn_hdr_size;
+ uint8_t inner_l2_hdr_size;
+ uint16_t inner_l4_hdr_offset;
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
@@ -1462,14 +1591,24 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
while (nb_tx_pkts--) {
/* Init flags/values */
- ipv6_ext_flg = false;
tunn_flg = false;
lso_flg = false;
nbds = 0;
+ vlan = 0;
bd1 = NULL;
bd2 = NULL;
bd3 = NULL;
hdr_size = 0;
+ bd1_bf = 0;
+ bd1_bd_flags_bf = 0;
+ bd2_bf1 = 0;
+ bd2_bf2 = 0;
+ mss = 0;
+ bd3_bf = 0;
+ mplsoudp_flg = false;
+ tunn_ipv6_ext_flg = false;
+ tunn_hdr_size = 0;
+ tunn_l4_hdr_start_offset = 0;
mbuf = *tx_pkts++;
assert(mbuf);
@@ -1479,36 +1618,177 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
break;
tx_ol_flags = mbuf->ol_flags;
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
-#define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
- if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
- ipv6_ext_flg = true;
-
- if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
+ /* TX prepare would have already checked supported tunnel Tx
+ * offloads. Don't rely on pkt_type marked by Rx, instead use
+ * tx_ol_flags to decide.
+ */
+ if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_VXLAN) ||
+ ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_MPLSINUDP)) {
+ /* Check against max which is Tunnel IPv6 + ext */
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+ break;
tunn_flg = true;
+ /* First indicate its a tunnel pkt */
+ bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ /* Legacy FW had flipped behavior in regard to this bit
+ * i.e. it needed to set to prevent FW from touching
+ * encapsulated packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy)) {
+ bd1_bf ^= 1 <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ }
- if (tx_ol_flags & PKT_TX_TCP_SEG)
- lso_flg = true;
+ /* Outer IP checksum offload */
+ if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_OUTER_IPV4)) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ }
- if (lso_flg) {
+ /**
+ * Currently, only inner checksum offload in MPLS-in-UDP
+ * tunnel with one MPLS label is supported. Both outer
+ * and inner layers lengths need to be provided in
+ * mbuf.
+ */
+ if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_MPLSINUDP) {
+ mplsoudp_flg = true;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ qede_mpls_tunn_tx_sanity_check(mbuf, txq);
+#endif
+ /* Outer L4 offset in two byte words */
+ tunn_l4_hdr_start_offset =
+ (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
+ /* Tunnel header size in two byte words */
+ tunn_hdr_size = (mbuf->outer_l2_len +
+ mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L2 header size in two byte words */
+ inner_l2_hdr_size = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L4 header offset from the beggining
+ * of inner packet in two byte words
+ */
+ inner_l4_hdr_offset = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
+
+ /* Inner L2 size and address type */
+ bd2_bf1 |= (inner_l2_hdr_size &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
+ bd2_bf1 |= (UNICAST_ADDRESS &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
+ /* Treated as IPv6+Ext */
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
+
+ /* Mark inner IPv6 if present */
+ if (tx_ol_flags & PKT_TX_IPV6)
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
+
+ /* Inner L4 offsets */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM |
+ PKT_TX_TCP_CKSUM))) {
+ /* Determines if BD3 is needed */
+ tunn_ipv6_ext_flg = true;
+ if ((tx_ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_UDP_CKSUM) {
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+ }
+
+ /* TODO other pseudo checksum modes are
+ * not supported
+ */
+ bd2_bf1 |=
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+ bd2_bf2 |= (inner_l4_hdr_offset &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+ }
+ } /* End MPLSoUDP */
+ } /* End Tunnel handling */
+
+ if (tx_ol_flags & PKT_TX_TCP_SEG) {
+ lso_flg = true;
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_LSO_PKT))
break;
+ /* For LSO, packet header and payload must reside on
+ * buffers pointed by different BDs. Using BD1 for HDR
+ * and BD2 onwards for data.
+ */
+ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ mss = rte_cpu_to_le_16(mbuf->tso_segsz);
+ /* Using one header BD */
+ bd3_bf |= rte_cpu_to_le_16(1 <<
+ ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
} else {
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
break;
+ bd1_bf |=
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
}
- if (tunn_flg && ipv6_ext_flg) {
- if (unlikely(txq->nb_tx_avail <
- ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
- break;
+ /* Descriptor based VLAN insertion */
+ if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
- if (ipv6_ext_flg) {
- if (unlikely(txq->nb_tx_avail <
- ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
- break;
+
+ /* Offload the IP checksum in the hardware */
+ if (tx_ol_flags & PKT_TX_IP_CKSUM) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
+ }
+
+ /* L4 checksum offload (tcp or udp) */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
}
/* Fill the entry in the SW ring and the BDs in the FW ring */
@@ -1520,107 +1800,49 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
nbds++;
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
- /* FW 8.10.x specific change */
- if (!lso_flg) {
- bd1->data.bitfields |=
- (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
- << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
- /* Map MBUF linear data for DMA and set in the BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- mbuf->data_len);
- } else {
- /* For LSO, packet header and payload must reside on
- * buffers pointed by different BDs. Using BD1 for HDR
- * and BD2 onwards for data.
- */
- hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- hdr_size);
- }
+ /* Map MBUF linear data for DMA and set in the BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->data_len);
+ bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
+ bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
+ bd1->data.vlan = vlan;
- if (tunn_flg) {
- /* First indicate its a tunnel pkt */
- bd1->data.bitfields |=
- ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
- ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
- /* Legacy FW had flipped behavior in regard to this bit
- * i.e. it needed to set to prevent FW from touching
- * encapsulated packets when it didn't need to.
- */
- if (unlikely(txq->is_legacy))
- bd1->data.bitfields ^=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
- /* Outer IP checksum offload */
- if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
- bd1->data.bd_flags.bitfields |=
- ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
- ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
- }
-
- /* Outer UDP checksum offload */
- bd1->data.bd_flags.bitfields |=
- ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
- ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
- }
-
- /* Descriptor based VLAN insertion */
- if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
- }
-
- if (lso_flg)
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
-
- /* Offload the IP checksum in the hardware */
- if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-
- /* L4 checksum offload (tcp or udp) */
- if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
- PKT_TX_UDP_CKSUM)))
- /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
-
- /* BD2 */
- if (lso_flg || ipv6_ext_flg) {
+ if (lso_flg || mplsoudp_flg) {
bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
(&txq->tx_pbl);
memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
nbds++;
- QEDE_BD_SET_ADDR_LEN(bd2,
- (hdr_size +
- rte_mbuf_data_dma_addr(mbuf)),
- mbuf->data_len - hdr_size);
- /* TBD: check pseudo csum iff tx_prepare not called? */
- if (ipv6_ext_flg) {
- bd2->data.bitfields1 |=
- ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
- ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
- }
- }
- /* BD3 */
- if (lso_flg || ipv6_ext_flg) {
- bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
- (&txq->tx_pbl);
- memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
- nbds++;
- if (lso_flg) {
- bd3->data.lso_mss =
- rte_cpu_to_le_16(mbuf->tso_segsz);
- /* Using one header BD */
- bd3->data.bitfields |=
- rte_cpu_to_le_16(1 <<
- ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ /* BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ hdr_size);
+ /* BD2 */
+ QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
+ rte_mbuf_data_dma_addr(mbuf)),
+ mbuf->data_len - hdr_size);
+ bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
+ if (mplsoudp_flg) {
+ bd2->data.bitfields2 =
+ rte_cpu_to_le_16(bd2_bf2);
+ /* Outer L3 size */
+ bd2->data.tunn_ip_size =
+ rte_cpu_to_le_16(mbuf->outer_l3_len);
+ }
+ /* BD3 */
+ if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
+ bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nbds++;
+ bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
+ if (lso_flg)
+ bd3->data.lso_mss = mss;
+ if (mplsoudp_flg) {
+ bd3->data.tunn_l4_hdr_start_offset_w =
+ tunn_l4_hdr_start_offset;
+ bd3->data.tunn_hdr_size_w =
+ tunn_hdr_size;
+ }
}
}
@@ -1636,8 +1858,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
- PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
- lso_flg, tunn_flg, ipv6_ext_flg);
+ PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg);
#endif
nb_pkt_sent++;
txq->xmit_pkts++;
@@ -1659,290 +1880,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_pkt_sent;
}
-static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct qede_fastpath *fp;
- uint8_t i, txq_index, tc;
- int rxq = 0, txq = 0;
-
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- fp->rxq = eth_dev->data->rx_queues[i];
- fp->rxq->queue_id = rxq++;
- }
-
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
- fp->txqs[tc] =
- eth_dev->data->tx_queues[txq_index];
- fp->txqs[tc]->queue_id = txq_index;
- if (qdev->dev_info.is_legacy)
- fp->txqs[tc]->is_legacy = true;
- }
- txq++;
- }
- }
-}
-
-int qede_dev_start(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- int rc;
-
- DP_INFO(edev, "Device state is %d\n", qdev->state);
-
- if (qdev->state == QEDE_DEV_START) {
- DP_INFO(edev, "Port is already started\n");
- return 0;
- }
-
- if (qdev->state == QEDE_DEV_CONFIG)
- qede_init_fp_queue(eth_dev);
-
- rc = qede_start_queues(eth_dev, true);
- if (rc) {
- DP_ERR(edev, "Failed to start queues\n");
- /* TBD: free */
- return rc;
- }
-
- /* Newer SR-IOV PF driver expects RX/TX queues to be started before
- * enabling RSS. Hence RSS configuration is deferred upto this point.
- * Also, we would like to retain similar behavior in PF case, so we
- * don't do PF/VF specific check here.
- */
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
- if (qede_config_rss(eth_dev))
- return -1;
-
- /* Bring-up the link */
- qede_dev_set_link_state(eth_dev, true);
-
- /* Start/resume traffic */
- qdev->ops->fastpath_start(edev);
-
- qdev->state = QEDE_DEV_START;
-
- DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
-
- return 0;
-}
-
-static int qede_drain_txq(struct qede_dev *qdev,
- struct qede_tx_queue *txq, bool allow_drain)
-{
- struct ecore_dev *edev = &qdev->edev;
- int rc, cnt = 1000;
-
- while (txq->sw_tx_cons != txq->sw_tx_prod) {
- qede_process_tx_compl(edev, txq);
- if (!cnt) {
- if (allow_drain) {
- DP_ERR(edev, "Tx queue[%u] is stuck,"
- "requesting MCP to drain\n",
- txq->queue_id);
- rc = qdev->ops->common->drain(edev);
- if (rc)
- return rc;
- return qede_drain_txq(qdev, txq, false);
- }
- DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
- "PROD=%d, CONS=%d\n",
- txq->queue_id, txq->sw_tx_prod,
- txq->sw_tx_cons);
- return -1;
- }
- cnt--;
- DELAY(1000);
- rte_compiler_barrier();
- }
-
- /* FW finished processing, wait for HW to transmit all tx packets */
- DELAY(2000);
-
- return 0;
-}
-
-static int qede_stop_queues(struct qede_dev *qdev)
-{
- struct qed_update_vport_params vport_update_params;
- struct ecore_dev *edev = &qdev->edev;
- struct ecore_sge_tpa_params tpa_params;
- struct qede_fastpath *fp;
- int rc, tc, i;
-
- /* Disable the vport */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 0;
- vport_update_params.update_rss_flg = 0;
- /* Disable TPA */
- if (qdev->enable_lro) {
- DP_INFO(edev, "Disabling LRO\n");
- memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
- qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
- vport_update_params.sge_tpa_params = &tpa_params;
- }
-
- DP_INFO(edev, "Deactivate vport\n");
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Failed to update vport\n");
- return rc;
- }
-
- DP_INFO(edev, "Flushing tx queues\n");
-
- /* Flush Tx queues. If needed, request drain from MCP */
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
-
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
-
- rc = qede_drain_txq(qdev, txq, true);
- if (rc)
- return rc;
- }
- }
- }
-
- /* Stop all Queues in reverse order */
- for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
- fp = &qdev->fp_array[i];
-
- /* Stop the Tx Queue(s) */
- if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
- DP_INFO(edev, "Stopping tx queues\n");
- rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- i);
- return rc;
- }
- }
- }
-
- /* Stop the Rx Queue */
- if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
- DP_INFO(edev, "Stopping rx queues\n");
- rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
- return rc;
- }
- }
- }
- qede_reset_fp_rings(qdev);
-
- return 0;
-}
-
-int qede_reset_fp_rings(struct qede_dev *qdev)
-{
- struct qede_fastpath *fp;
- struct qede_tx_queue *txq;
- uint8_t tc;
- uint16_t id, i;
-
- for_each_queue(id) {
- fp = &qdev->fp_array[id];
-
- if (fp->type & QEDE_FASTPATH_RX) {
- DP_INFO(&qdev->edev,
- "Reset FP chain for RSS %u\n", id);
- qede_rx_queue_release_mbufs(fp->rxq);
- ecore_chain_reset(&fp->rxq->rx_bd_ring);
- ecore_chain_reset(&fp->rxq->rx_comp_ring);
- fp->rxq->sw_rx_prod = 0;
- fp->rxq->sw_rx_cons = 0;
- *fp->rxq->hw_cons_ptr = 0;
- for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
- if (qede_alloc_rx_buffer(fp->rxq)) {
- DP_ERR(&qdev->edev,
- "RX buffer allocation failed\n");
- return -ENOMEM;
- }
- }
- }
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- txq = fp->txqs[tc];
- qede_tx_queue_release_mbufs(txq);
- ecore_chain_reset(&txq->tx_pbl);
- txq->sw_tx_cons = 0;
- txq->sw_tx_prod = 0;
- *txq->hw_cons_ptr = 0;
- }
- }
- }
-
- return 0;
-}
-
-/* This function frees all memory of a single fp */
-void qede_free_mem_load(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct qede_fastpath *fp;
- uint16_t txq_idx;
- uint8_t id;
- uint8_t tc;
-
- for_each_queue(id) {
- fp = &qdev->fp_array[id];
- if (fp->type & QEDE_FASTPATH_RX) {
- if (!fp->rxq)
- continue;
- qede_rx_queue_release(fp->rxq);
- eth_dev->data->rx_queues[id] = NULL;
- } else {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- if (!fp->txqs[tc])
- continue;
- txq_idx = fp->txqs[tc]->queue_id;
- qede_tx_queue_release(fp->txqs[tc]);
- eth_dev->data->tx_queues[txq_idx] = NULL;
- }
- }
- }
-}
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
-
- DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
-
- if (qdev->state != QEDE_DEV_START) {
- DP_INFO(edev, "Device not yet started\n");
- return;
- }
-
- if (qede_stop_queues(qdev))
- DP_ERR(edev, "Didn't succeed to close queues\n");
-
- DP_INFO(edev, "Stopped queues\n");
-
- qdev->ops->fastpath_stop(edev);
-
- /* Bring the link down */
- qede_dev_set_link_state(eth_dev, false);
-
- qdev->state = QEDE_DEV_STOP;
-
- DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
-}
-
uint16_t
qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
__rte_unused struct rte_mbuf **pkts,
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 21f2dacd..b551fd6a 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -77,10 +77,10 @@
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
-#define MAX_NUM_TC 8
-
-#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
-
+#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
+#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
+#define QEDE_RXTX_MAX(qdev) \
+ (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
/* Macros for non-tunnel packet types lkup table */
#define QEDE_PKT_TYPE_UNKNOWN 0x0
@@ -135,7 +135,8 @@
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_VXLAN)
+ PKT_TX_TUNNEL_VXLAN | \
+ PKT_TX_TUNNEL_MPLSINUDP)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
@@ -165,6 +166,7 @@ struct qede_rx_queue {
uint16_t *hw_cons_ptr;
void OSAL_IOMEM *hw_rxq_prod_addr;
struct qede_rx_entry *sw_rx_ring;
+ struct ecore_sb_info *sb_info;
uint16_t sw_rx_cons;
uint16_t sw_rx_prod;
uint16_t nb_rx_desc;
@@ -213,13 +215,9 @@ struct qede_tx_queue {
};
struct qede_fastpath {
- struct qede_dev *qdev;
- u8 type;
- uint8_t id;
struct ecore_sb_info *sb_info;
struct qede_rx_queue *rxq;
- struct qede_tx_queue *txqs[MAX_NUM_TC];
- char name[80];
+ struct qede_tx_queue *txq;
};
/*
@@ -240,16 +238,6 @@ void qede_rx_queue_release(void *rx_queue);
void qede_tx_queue_release(void *tx_queue);
-int qede_dev_start(struct rte_eth_dev *eth_dev);
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev);
-
-int qede_reset_fp_rings(struct qede_dev *qdev);
-
-void qede_free_fp_arrays(struct qede_dev *qdev);
-
-void qede_free_mem_load(struct rte_eth_dev *eth_dev);
-
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -259,9 +247,13 @@ uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
- __rte_unused struct rte_mbuf **pkts,
- __rte_unused uint16_t nb_pkts);
+uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
+
+int qede_start_queues(struct rte_eth_dev *eth_dev);
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index d4dce957..464d3d38 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -45,12 +45,23 @@
#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
#define ETH_RING_ACTION_CREATE "CREATE"
#define ETH_RING_ACTION_ATTACH "ATTACH"
+#define ETH_RING_INTERNAL_ARG "internal"
static const char *valid_arguments[] = {
ETH_RING_NUMA_NODE_ACTION_ARG,
+ ETH_RING_INTERNAL_ARG,
NULL
};
+struct ring_internal_args {
+ struct rte_ring * const *rx_queues;
+ const unsigned int nb_rx_queues;
+ struct rte_ring * const *tx_queues;
+ const unsigned int nb_tx_queues;
+ const unsigned int numa_node;
+ void *addr; /* self addr for sanity check */
+};
+
enum dev_action {
DEV_CREATE,
DEV_ATTACH
@@ -263,11 +274,14 @@ static int
do_eth_dev_ring_create(const char *name,
struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
- const unsigned numa_node, enum dev_action action)
+ const unsigned int numa_node, enum dev_action action,
+ struct rte_eth_dev **eth_dev_p)
{
struct rte_eth_dev_data *data = NULL;
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
+ void **rx_queues_local = NULL;
+ void **tx_queues_local = NULL;
unsigned i;
RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
@@ -282,16 +296,16 @@ do_eth_dev_ring_create(const char *name,
goto error;
}
- data->rx_queues = rte_zmalloc_socket(name,
+ rx_queues_local = rte_zmalloc_socket(name,
sizeof(void *) * nb_rx_queues, 0, numa_node);
- if (data->rx_queues == NULL) {
+ if (rx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
}
- data->tx_queues = rte_zmalloc_socket(name,
+ tx_queues_local = rte_zmalloc_socket(name,
sizeof(void *) * nb_tx_queues, 0, numa_node);
- if (data->tx_queues == NULL) {
+ if (tx_queues_local == NULL) {
rte_errno = ENOMEM;
goto error;
}
@@ -318,6 +332,10 @@ do_eth_dev_ring_create(const char *name,
/* NOTE: we'll replace the data element, of originally allocated eth_dev
* so the rings are local per-process */
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data->rx_queues = rx_queues_local;
+ data->tx_queues = tx_queues_local;
+
internals->action = action;
internals->max_rx_queues = nb_rx_queues;
internals->max_tx_queues = nb_tx_queues;
@@ -331,8 +349,6 @@ do_eth_dev_ring_create(const char *name,
}
data->dev_private = internals;
- data->port_id = eth_dev->data->port_id;
- memmove(data->name, eth_dev->data->name, sizeof(data->name));
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
@@ -342,20 +358,19 @@ do_eth_dev_ring_create(const char *name,
eth_dev->dev_ops = &ops;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
data->kdrv = RTE_KDRV_NONE;
- data->drv_name = pmd_ring_drv.driver.name;
data->numa_node = numa_node;
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_ring_rx;
eth_dev->tx_pkt_burst = eth_ring_tx;
+ *eth_dev_p = eth_dev;
+
return data->port_id;
error:
- if (data) {
- rte_free(data->rx_queues);
- rte_free(data->tx_queues);
- }
+ rte_free(rx_queues_local);
+ rte_free(tx_queues_local);
rte_free(data);
rte_free(internals);
@@ -369,6 +384,19 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
const unsigned nb_tx_queues,
const unsigned numa_node)
{
+ struct ring_internal_args args = {
+ .rx_queues = rx_queues,
+ .nb_rx_queues = nb_rx_queues,
+ .tx_queues = tx_queues,
+ .nb_tx_queues = nb_tx_queues,
+ .numa_node = numa_node,
+ .addr = &args,
+ };
+ char args_str[32] = { 0 };
+ char ring_name[32] = { 0 };
+ uint8_t port_id = RTE_MAX_ETHPORTS;
+ int ret;
+
/* do some parameter checking */
if (rx_queues == NULL && nb_rx_queues > 0) {
rte_errno = EINVAL;
@@ -383,8 +411,18 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
return -1;
}
- return do_eth_dev_ring_create(name, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, numa_node, DEV_ATTACH);
+ snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
+ snprintf(ring_name, 32, "net_ring_%s", name);
+
+ ret = rte_vdev_init(ring_name, args_str);
+ if (ret) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ rte_eth_dev_get_port_by_name(ring_name, &port_id);
+
+ return port_id;
}
int
@@ -396,7 +434,7 @@ rte_eth_from_ring(struct rte_ring *r)
static int
eth_dev_ring_create(const char *name, const unsigned numa_node,
- enum dev_action action)
+ enum dev_action action, struct rte_eth_dev **eth_dev)
{
/* rx and tx are so-called from point of view of first port.
* They are inverted from the point of view of second port
@@ -418,7 +456,7 @@ eth_dev_ring_create(const char *name, const unsigned numa_node,
}
if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
- numa_node, action) < 0)
+ numa_node, action, eth_dev) < 0)
return -1;
return 0;
@@ -450,13 +488,14 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
ret = -EINVAL;
if (!name) {
- RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
+ RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n");
goto out;
}
node = strchr(name, ':');
if (!node) {
- RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
+ RTE_LOG(WARNING, PMD, "could not parse node value from %s\n",
+ name);
goto out;
}
@@ -465,7 +504,8 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
action = strchr(node, ':');
if (!action) {
- RTE_LOG(WARNING, PMD, "could not action value from %s", node);
+ RTE_LOG(WARNING, PMD, "could not parse action value from %s\n",
+ node);
goto out;
}
@@ -502,12 +542,31 @@ out:
}
static int
+parse_internal_args(const char *key __rte_unused, const char *value,
+ void *data)
+{
+ struct ring_internal_args **internal_args = data;
+ void *args;
+
+ sscanf(value, "%p", &args);
+
+ *internal_args = args;
+
+ if ((*internal_args)->addr != args)
+ return -1;
+
+ return 0;
+}
+
+static int
rte_pmd_ring_probe(struct rte_vdev_device *dev)
{
const char *name, *params;
struct rte_kvargs *kvlist = NULL;
int ret = 0;
struct node_action_list *info = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct ring_internal_args *internal_args;
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
@@ -515,30 +574,53 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
if (params == NULL || params[0] == '\0') {
- ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
+ ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
+ &eth_dev);
if (ret == -1) {
RTE_LOG(INFO, PMD,
"Attach to pmd_ring for %s\n", name);
ret = eth_dev_ring_create(name, rte_socket_id(),
- DEV_ATTACH);
+ DEV_ATTACH, &eth_dev);
}
- }
- else {
+ } else {
kvlist = rte_kvargs_parse(params, valid_arguments);
if (!kvlist) {
RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
" rings-backed ethernet device\n");
ret = eth_dev_ring_create(name, rte_socket_id(),
- DEV_CREATE);
+ DEV_CREATE, &eth_dev);
if (ret == -1) {
RTE_LOG(INFO, PMD,
"Attach to pmd_ring for %s\n",
name);
ret = eth_dev_ring_create(name, rte_socket_id(),
- DEV_ATTACH);
+ DEV_ATTACH, &eth_dev);
}
+
+ if (eth_dev)
+ eth_dev->device = &dev->device;
+
return ret;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
+ parse_internal_args,
+ &internal_args);
+ if (ret < 0)
+ goto out_free;
+
+ ret = do_eth_dev_ring_create(name,
+ internal_args->rx_queues,
+ internal_args->nb_rx_queues,
+ internal_args->tx_queues,
+ internal_args->nb_tx_queues,
+ internal_args->numa_node,
+ DEV_ATTACH,
+ &eth_dev);
+ if (ret >= 0)
+ ret = 0;
} else {
ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
info = rte_zmalloc("struct node_action_list",
@@ -560,7 +642,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
for (info->count = 0; info->count < info->total; info->count++) {
ret = eth_dev_ring_create(info->list[info->count].name,
info->list[info->count].node,
- info->list[info->count].action);
+ info->list[info->count].action,
+ &eth_dev);
if ((ret == -1) &&
(info->list[info->count].action == DEV_CREATE)) {
RTE_LOG(INFO, PMD,
@@ -568,12 +651,16 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
name);
ret = eth_dev_ring_create(name,
info->list[info->count].node,
- DEV_ATTACH);
+ DEV_ATTACH,
+ &eth_dev);
}
}
}
}
+ if (eth_dev)
+ eth_dev->device = &dev->device;
+
out_free:
rte_kvargs_free(kvlist);
rte_free(info);
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 4e241b22..6cecfc00 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -465,7 +465,7 @@ static int
sfc_mem_bar_init(struct sfc_adapter *sa)
{
struct rte_eth_dev *eth_dev = sa->eth_dev;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
efsys_bar_t *ebp = &sa->mem_bar;
unsigned int i;
struct rte_mem_resource *res;
@@ -666,7 +666,7 @@ sfc_detach(struct sfc_adapter *sa)
int
sfc_probe(struct sfc_adapter *sa)
{
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
efx_nic_t *enp;
int rc;
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index fad0ce04..286d1ac1 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -34,6 +34,7 @@
#include <stdbool.h>
+#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
@@ -46,9 +47,6 @@
extern "C" {
#endif
-#define SFC_DEV_TO_PCI(eth_dev) \
- RTE_DEV_TO_PCI((eth_dev)->device)
-
#if EFSYS_OPT_RX_SCALE
/** RSS key length (bytes) */
#define SFC_RSS_KEY_SIZE 40
@@ -153,6 +151,11 @@ struct sfc_port {
boolean_t flow_ctrl_autoneg;
size_t pdu;
+ /*
+ * Flow API isolated mode overrides promisc and allmulti settings;
+ * they won't be applied if isolated mode is active
+ */
+ boolean_t isolated;
boolean_t promisc;
boolean_t allmulti;
@@ -162,6 +165,7 @@ struct sfc_port {
rte_spinlock_t mac_stats_lock;
uint64_t *mac_stats_buf;
+ unsigned int mac_stats_nb_supported;
efsys_mem_t mac_stats_dma_mem;
boolean_t mac_stats_reset_pending;
uint16_t mac_stats_update_period_ms;
@@ -182,6 +186,8 @@ struct sfc_adapter {
*/
rte_spinlock_t lock;
enum sfc_adapter_state state;
+ struct rte_pci_addr pci_addr;
+ uint16_t port_id;
struct rte_eth_dev *eth_dev;
struct rte_kvargs *kvargs;
bool debug_init;
@@ -226,7 +232,18 @@ struct sfc_adapter {
uint8_t rss_key[SFC_RSS_KEY_SIZE];
#endif
+ /*
+ * Shared memory copy of the Rx datapath name to be used by
+ * the secondary process to find Rx datapath to be used.
+ */
+ char *dp_rx_name;
const struct sfc_dp_rx *dp_rx;
+
+ /*
+ * Shared memory copy of the Tx datapath name to be used by
+ * the secondary process to find Rx datapath to be used.
+ */
+ char *dp_tx_name;
const struct sfc_dp_tx *dp_tx;
};
diff --git a/drivers/net/sfc/sfc_debug.h b/drivers/net/sfc/sfc_debug.h
index c0b48677..92eba9c3 100644
--- a/drivers/net/sfc/sfc_debug.h
+++ b/drivers/net/sfc/sfc_debug.h
@@ -47,13 +47,12 @@
/* Log PMD message, automatically add prefix and \n */
#define sfc_panic(sa, fmt, args...) \
do { \
- const struct rte_eth_dev *_dev = (sa)->eth_dev; \
- const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ const struct sfc_adapter *_sa = (sa); \
\
rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
- _pci_dev->addr.domain, _pci_dev->addr.bus, \
- _pci_dev->addr.devid, _pci_dev->addr.function,\
- _dev->data->port_id, ##args); \
+ _sa->pci_addr.domain, _sa->pci_addr.bus, \
+ _sa->pci_addr.devid, _sa->pci_addr.function, \
+ _sa->port_id, ##args); \
} while (0)
#endif /* _SFC_DEBUG_H_ */
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index 9d05a4b3..a7b82784 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -161,6 +161,7 @@ struct sfc_dp_rx {
unsigned int features;
#define SFC_DP_RX_FEAT_SCATTER 0x1
+#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
sfc_dp_rx_qcreate_t *qcreate;
sfc_dp_rx_qdestroy_t *qdestroy;
sfc_dp_rx_qstart_t *qstart;
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index 2bb9a2e7..c1c34191 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -135,6 +135,7 @@ struct sfc_dp_tx {
#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1
#define SFC_DP_TX_FEAT_TSO 0x2
#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
+#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;
sfc_dp_tx_qstart_t *qstart;
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 1484baba..60812cbe 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -699,7 +699,7 @@ struct sfc_dp_rx sfc_ef10_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = 0,
+ .features = SFC_DP_RX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_rx_qcreate,
.qdestroy = sfc_ef10_rx_qdestroy,
.qstart = sfc_ef10_rx_qstart,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index bac9baa9..da2b5edb 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -128,14 +128,10 @@ sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
return true;
}
-static void
-sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+static unsigned int
+sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
{
- const unsigned int old_read_ptr = txq->evq_read_ptr;
- const unsigned int ptr_mask = txq->ptr_mask;
- unsigned int completed = txq->completed;
- unsigned int pending = completed;
- const unsigned int curr_done = pending - 1;
+ const unsigned int curr_done = txq->completed - 1;
unsigned int anew_done = curr_done;
efx_qword_t tx_ev;
@@ -148,7 +144,18 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
/* Update the latest done descriptor */
anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
}
- pending += (anew_done - curr_done) & ptr_mask;
+ return (anew_done - curr_done) & txq->ptr_mask;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+
+ pending += sfc_ef10_tx_process_events(txq);
if (pending != completed) {
do {
@@ -349,6 +356,33 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return pktp - &tx_pkts[0];
}
+static void
+sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+
+ pending += sfc_ef10_tx_process_events(txq);
+
+ if (pending != completed) {
+ do {
+ struct sfc_ef10_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & ptr_mask];
+
+ rte_pktmbuf_free_seg(txd->mbuf);
+ } while (++completed != pending);
+
+ txq->completed = completed;
+ }
+
+ sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+ txq->evq_read_ptr);
+}
+
+
static uint16_t
sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -372,7 +406,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
if (reap_done) {
- sfc_ef10_tx_reap(txq);
+ sfc_ef10_simple_tx_reap(txq);
dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
(added - txq->completed);
}
@@ -401,7 +435,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
if (!reap_done)
- sfc_ef10_tx_reap(txq);
+ sfc_ef10_simple_tx_reap(txq);
#endif
return pktp - &tx_pkts[0];
@@ -516,12 +550,15 @@ static void
sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
{
struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
- unsigned int txds;
+ unsigned int completed;
+
+ for (completed = txq->completed; completed != txq->added; ++completed) {
+ struct sfc_ef10_tx_sw_desc *txd;
- for (txds = 0; txds <= txq->ptr_mask; ++txds) {
- if (txq->sw_ring[txds].mbuf != NULL) {
- rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
- txq->sw_ring[txds].mbuf = NULL;
+ txd = &txq->sw_ring[completed & txq->ptr_mask];
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
}
}
@@ -534,7 +571,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
.type = SFC_DP_TX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = SFC_DP_TX_FEAT_MULTI_SEG,
+ .features = SFC_DP_TX_FEAT_MULTI_SEG |
+ SFC_DP_TX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
.qstart = sfc_ef10_tx_qstart,
@@ -549,7 +587,7 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
.name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
.type = SFC_DP_TX,
},
- .features = 0,
+ .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
.qstart = sfc_ef10_tx_qstart,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index bdb4c466..12bcd6fa 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -107,7 +107,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
sfc_log_init(sa, "entry");
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
/* Autonegotiation may be disabled */
@@ -361,8 +361,13 @@ sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
if (*toggle != enabled) {
*toggle = enabled;
- if ((sa->state == SFC_ADAPTER_STARTED) &&
- (sfc_set_rx_mode(sa) != 0)) {
+ if (port->isolated) {
+ sfc_warn(sa, "isolated mode is active on the port");
+ sfc_warn(sa, "the change is to be applied on the next "
+ "start provided that isolated mode is "
+ "disabled prior the next start");
+ } else if ((sa->state == SFC_ADAPTER_STARTED) &&
+ (sfc_set_rx_mode(sa) != 0)) {
*toggle = !(enabled);
sfc_warn(sa, "Failed to %s %s mode",
((enabled) ? "enable" : "disable"), desc);
@@ -661,6 +666,85 @@ sfc_xstats_get_names(struct rte_eth_dev *dev,
}
static int
+sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ unsigned int nb_supported = 0;
+ unsigned int nb_written = 0;
+ unsigned int i;
+ int ret;
+ int rc;
+
+ if (unlikely(values == NULL) ||
+ unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
+ return port->mac_stats_nb_supported;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ rc = sfc_port_update_mac_stats(sa);
+ if (rc != 0) {
+ SFC_ASSERT(rc > 0);
+ ret = -rc;
+ goto unlock;
+ }
+
+ mac_stats = port->mac_stats_buf;
+
+ for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
+ if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ continue;
+
+ if ((ids == NULL) || (ids[nb_written] == nb_supported))
+ values[nb_written++] = mac_stats[i];
+
+ ++nb_supported;
+ }
+
+ ret = nb_written;
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return ret;
+}
+
+static int
+sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int nb_supported = 0;
+ unsigned int nb_written = 0;
+ unsigned int i;
+
+ if (unlikely(xstats_names == NULL) ||
+ unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
+ return port->mac_stats_nb_supported;
+
+ for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
+ if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ continue;
+
+ if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
+ char *name = xstats_names[nb_written++].name;
+
+ strncpy(name, efx_mac_stat_name(sa->nic, i),
+ sizeof(xstats_names[0].name));
+ name[sizeof(xstats_names[0].name) - 1] = '\0';
+ }
+
+ ++nb_supported;
+ }
+
+ return nb_written;
+}
+
+static int
sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
@@ -826,10 +910,17 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct sfc_adapter *sa = dev->data->dev_private;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_port *port = &sa->port;
int rc;
sfc_adapter_lock(sa);
+ if (port->isolated) {
+ sfc_err(sa, "isolated mode is active on the port");
+ sfc_err(sa, "will not set MAC address");
+ goto unlock;
+ }
+
if (sa->state != SFC_ADAPTER_STARTED) {
sfc_info(sa, "the port is not started");
sfc_info(sa, "the new MAC address will be set on port start");
@@ -884,6 +975,12 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
int rc;
unsigned int i;
+ if (port->isolated) {
+ sfc_err(sa, "isolated mode is active on the port");
+ sfc_err(sa, "will not set multicast address list");
+ return -ENOTSUP;
+ }
+
if (mc_addrs == NULL)
return -ENOBUFS;
@@ -913,6 +1010,10 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
return -rc;
}
+/*
+ * The function is used by the secondary process as well. It must not
+ * use any process-local pointers from the adapter data.
+ */
static void
sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_eth_rxq_info *qinfo)
@@ -939,6 +1040,10 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
sfc_adapter_unlock(sa);
}
+/*
+ * The function is used by the secondary process as well. It must not
+ * use any process-local pointers from the adapter data.
+ */
static void
sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
struct rte_eth_txq_info *qinfo)
@@ -1083,8 +1188,9 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
return -ENOTSUP;
if (sa->rss_channels == 0)
@@ -1113,9 +1219,13 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
unsigned int efx_hash_types;
int rc = 0;
+ if (port->isolated)
+ return -ENOTSUP;
+
if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
@@ -1180,9 +1290,10 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
int entry;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
return -ENOTSUP;
if (sa->rss_channels == 0)
@@ -1212,11 +1323,15 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
unsigned int *rss_tbl_new;
uint16_t entry;
int rc;
+ if (port->isolated)
+ return -ENOTSUP;
+
if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
@@ -1370,8 +1485,33 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.rxq_info_get = sfc_rx_queue_info_get,
.txq_info_get = sfc_tx_queue_info_get,
.fw_version_get = sfc_fw_version_get,
+ .xstats_get_by_id = sfc_xstats_get_by_id,
+ .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
};
+/**
+ * Duplicate a string in potentially shared memory required for
+ * multi-process support.
+ *
+ * strdup() allocates from process-local heap/memory.
+ */
+static char *
+sfc_strdup(const char *str)
+{
+ size_t size;
+ char *copy;
+
+ if (str == NULL)
+ return NULL;
+
+ size = strlen(str) + 1;
+ copy = rte_malloc(__func__, size, 0);
+ if (copy != NULL)
+ rte_memcpy(copy, str, size);
+
+ return copy;
+}
+
static int
sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
{
@@ -1407,7 +1547,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
"Insufficient Hw/FW capabilities to use Rx datapath %s",
rx_name);
rc = EINVAL;
- goto fail_dp_rx;
+ goto fail_dp_rx_caps;
}
} else {
sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
@@ -1419,7 +1559,13 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
}
}
- sfc_info(sa, "use %s Rx datapath", sa->dp_rx->dp.name);
+ sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name);
+ if (sa->dp_rx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_rx_name;
+ }
+
+ sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name);
dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
@@ -1440,7 +1586,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
"Insufficient Hw/FW capabilities to use Tx datapath %s",
tx_name);
rc = EINVAL;
- goto fail_dp_tx;
+ goto fail_dp_tx_caps;
}
} else {
sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
@@ -1452,7 +1598,13 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
}
}
- sfc_info(sa, "use %s Tx datapath", sa->dp_tx->dp.name);
+ sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name);
+ if (sa->dp_tx_name == NULL) {
+ rc = ENOMEM;
+ goto fail_dp_tx_name;
+ }
+
+ sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name);
dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
@@ -1460,14 +1612,108 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
return 0;
+fail_dp_tx_name:
+fail_dp_tx_caps:
+ sa->dp_tx = NULL;
+
fail_dp_tx:
fail_kvarg_tx_datapath:
+ rte_free(sa->dp_rx_name);
+ sa->dp_rx_name = NULL;
+
+fail_dp_rx_name:
+fail_dp_rx_caps:
+ sa->dp_rx = NULL;
+
fail_dp_rx:
fail_kvarg_rx_datapath:
return rc;
}
static void
+sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(sa->dp_tx_name);
+ sa->dp_tx_name = NULL;
+ sa->dp_tx = NULL;
+
+ rte_free(sa->dp_rx_name);
+ sa->dp_rx_name = NULL;
+ sa->dp_rx = NULL;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+};
+
+static int
+sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
+{
+ /*
+ * Device private data has really many process-local pointers.
+ * Below code should be extremely careful to use data located
+ * in shared memory only.
+ */
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+ int rc;
+
+ dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
+ if (dp_rx == NULL) {
+ sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
+ sfc_err(sa, "%s Rx datapath does not support multi-process",
+ sa->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_rx_multi_process;
+ }
+
+ dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
+ if (dp_tx == NULL) {
+ sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
+ sfc_err(sa, "%s Tx datapath does not support multi-process",
+ sa->dp_tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx_multi_process;
+ }
+
+ dev->rx_pkt_burst = dp_rx->pkt_burst;
+ dev->tx_pkt_burst = dp_tx->pkt_burst;
+ dev->dev_ops = &sfc_eth_dev_secondary_ops;
+
+ return 0;
+
+fail_dp_tx_multi_process:
+fail_dp_tx:
+fail_dp_rx_multi_process:
+fail_dp_rx:
+ return rc;
+}
+
+static void
+sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
+{
+ dev->dev_ops = NULL;
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
+}
+
+static void
sfc_register_dp(void)
{
/* Register once */
@@ -1486,19 +1732,27 @@ static int
sfc_eth_dev_init(struct rte_eth_dev *dev)
{
struct sfc_adapter *sa = dev->data->dev_private;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
int rc;
const efx_nic_cfg_t *encp;
const struct ether_addr *from;
sfc_register_dp();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -sfc_eth_dev_secondary_set_ops(dev);
+
/* Required for logging */
+ sa->pci_addr = pci_dev->addr;
+ sa->port_id = dev->data->port_id;
+
sa->eth_dev = dev;
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
rc = sfc_kvargs_parse(sa);
if (rc != 0)
goto fail_kvargs_parse;
@@ -1549,6 +1803,8 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
return 0;
fail_attach:
+ sfc_eth_dev_clear_ops(dev);
+
fail_set_ops:
sfc_unprobe(sa);
@@ -1571,22 +1827,26 @@ fail_kvargs_parse:
static int
sfc_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ sfc_eth_dev_secondary_clear_ops(dev);
+ return 0;
+ }
+
+ sa = dev->data->dev_private;
sfc_log_init(sa, "entry");
sfc_adapter_lock(sa);
+ sfc_eth_dev_clear_ops(dev);
+
sfc_detach(sa);
sfc_unprobe(sa);
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
sfc_kvargs_cleanup(sa);
sfc_adapter_unlock(sa);
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
index 160d39f9..a16dc27b 100644
--- a/drivers/net/sfc/sfc_ev.c
+++ b/drivers/net/sfc/sfc_ev.c
@@ -237,8 +237,7 @@ sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
}
static boolean_t
-sfc_ev_exception(void *arg, __rte_unused uint32_t code,
- __rte_unused uint32_t data)
+sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
{
struct sfc_evq *evq = arg;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index c3ea43a6..110dfb89 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -1112,12 +1112,35 @@ sfc_flow_flush(struct rte_eth_dev *dev,
return -ret;
}
+static int
+sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ int ret = 0;
+
+ sfc_adapter_lock(sa);
+ if (sa->state != SFC_ADAPTER_INITIALIZED) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "please close the port first");
+ ret = -rte_errno;
+ } else {
+ port->isolated = (enable) ? B_TRUE : B_FALSE;
+ }
+ sfc_adapter_unlock(sa);
+
+ return ret;
+}
+
const struct rte_flow_ops sfc_flow_ops = {
.validate = sfc_flow_validate,
.create = sfc_flow_create,
.destroy = sfc_flow_destroy,
.flush = sfc_flow_flush,
.query = NULL,
+ .isolate = sfc_flow_isolate,
};
void
diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c
index 7eb4b86c..ee59cb1c 100644
--- a/drivers/net/sfc/sfc_intr.c
+++ b/drivers/net/sfc/sfc_intr.c
@@ -77,7 +77,7 @@ sfc_intr_line_handler(void *cb_arg)
boolean_t fatal;
uint32_t qmask;
unsigned int lsc_seq = sa->port.lsc_seq;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
@@ -111,7 +111,8 @@ exit:
sa->eth_dev->data->dev_link.link_status ?
"UP" : "DOWN");
_rte_eth_dev_callback_process(sa->eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
@@ -122,7 +123,7 @@ sfc_intr_message_handler(void *cb_arg)
efx_nic_t *enp = sa->nic;
boolean_t fatal;
unsigned int lsc_seq = sa->port.lsc_seq;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
@@ -152,7 +153,8 @@ exit:
if (lsc_seq != sa->port.lsc_seq) {
sfc_info(sa, "link status change event");
_rte_eth_dev_callback_process(sa->eth_dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
@@ -177,7 +179,7 @@ sfc_intr_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_intr_init;
- pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
intr_handle = &pci_dev->intr_handle;
if (intr->handler != NULL) {
@@ -232,7 +234,7 @@ void
sfc_intr_stop(struct sfc_adapter *sa)
{
struct sfc_intr *intr = &sa->intr;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
@@ -306,7 +308,7 @@ int
sfc_intr_attach(struct sfc_adapter *sa)
{
struct sfc_intr *intr = &sa->intr;
- struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
sfc_log_init(sa, "entry");
diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h
index 8a5e2302..b1a9df44 100644
--- a/drivers/net/sfc/sfc_log.h
+++ b/drivers/net/sfc/sfc_log.h
@@ -35,17 +35,16 @@
/* Log PMD message, automatically add prefix and \n */
#define SFC_LOG(sa, level, ...) \
do { \
- const struct rte_eth_dev *_dev = (sa)->eth_dev; \
- const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ const struct sfc_adapter *__sa = (sa); \
\
RTE_LOG(level, PMD, \
RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \
RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
- _pci_dev->addr.domain, \
- _pci_dev->addr.bus, \
- _pci_dev->addr.devid, \
- _pci_dev->addr.function, \
- _dev->data->port_id, \
+ __sa->pci_addr.domain, \
+ __sa->pci_addr.bus, \
+ __sa->pci_addr.devid, \
+ __sa->pci_addr.function, \
+ __sa->port_id, \
RTE_FMT_TAIL(__VA_ARGS__,))); \
} while (0)
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index ee96bcde..c1466a77 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -151,6 +151,7 @@ sfc_port_start(struct sfc_adapter *sa)
uint32_t phy_adv_cap;
const uint32_t phy_pause_caps =
((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
+ unsigned int i;
sfc_log_init(sa, "entry");
@@ -186,26 +187,29 @@ sfc_port_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_mac_pdu_set;
- sfc_log_init(sa, "set MAC address");
- rc = efx_mac_addr_set(sa->nic,
- sa->eth_dev->data->mac_addrs[0].addr_bytes);
- if (rc != 0)
- goto fail_mac_addr_set;
-
- sfc_log_init(sa, "set MAC filters");
- port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
- B_TRUE : B_FALSE;
- port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
- B_TRUE : B_FALSE;
- rc = sfc_set_rx_mode(sa);
- if (rc != 0)
- goto fail_mac_filter_set;
+ if (!port->isolated) {
+ struct ether_addr *mac_addrs = sa->eth_dev->data->mac_addrs;
- sfc_log_init(sa, "set multicast address list");
- rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
- port->nb_mcast_addrs);
- if (rc != 0)
- goto fail_mcast_address_list_set;
+ sfc_log_init(sa, "set MAC address");
+ rc = efx_mac_addr_set(sa->nic, mac_addrs[0].addr_bytes);
+ if (rc != 0)
+ goto fail_mac_addr_set;
+
+ sfc_log_init(sa, "set MAC filters");
+ port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
+ B_TRUE : B_FALSE;
+ port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
+ B_TRUE : B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ goto fail_mac_filter_set;
+
+ sfc_log_init(sa, "set multicast address list");
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ goto fail_mcast_address_list_set;
+ }
if (port->mac_stats_reset_pending) {
rc = sfc_port_reset_mac_stats(sa);
@@ -219,6 +223,10 @@ sfc_port_start(struct sfc_adapter *sa)
efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
sizeof(port->mac_stats_mask));
+ for (i = 0, port->mac_stats_nb_supported = 0; i < EFX_MAC_NSTATS; ++i)
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
+ port->mac_stats_nb_supported++;
+
port->mac_stats_update_generation = 0;
if (port->mac_stats_update_period_ms != 0) {
@@ -242,6 +250,18 @@ sfc_port_start(struct sfc_adapter *sa)
}
}
+ if ((port->mac_stats_update_period_ms != 0) &&
+ port->mac_stats_periodic_dma_supported) {
+ /*
+ * Request an explicit MAC stats upload immediately to
+ * preclude bogus figures readback if the user decides
+ * to read stats before periodic DMA is really started
+ */
+ rc = efx_mac_stats_upload(sa->nic, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_upload;
+ }
+
sfc_log_init(sa, "disable MAC drain");
rc = efx_mac_drain(sa->nic, B_FALSE);
if (rc != 0)
@@ -262,6 +282,7 @@ fail_mac_drain:
(void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
0, B_FALSE);
+fail_mac_stats_upload:
fail_mac_stats_periodic:
fail_mcast_address_list_set:
fail_mac_filter_set:
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 2ecd6f26..1bf86445 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -292,7 +292,7 @@ sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (desc_flags & EFX_PKT_CONT) {
/* The packet is scattered, more fragments to come */
scatter_pkt = m;
- /* Futher fragments have no prefix */
+ /* Further fragments have no prefix */
prefix_size = 0;
continue;
}
@@ -529,6 +529,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_rxq *rxq;
unsigned int retry_count;
unsigned int wait_count;
+ int rc;
rxq = sa->rxq_info[sw_index].rxq;
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
@@ -541,8 +542,10 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
(retry_count < SFC_RX_QFLUSH_ATTEMPTS);
++retry_count) {
- if (efx_rx_qflush(rxq->common) != 0) {
- rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rc = efx_rx_qflush(rxq->common);
+ if (rc != 0) {
+ rxq->state |= (rc == EALREADY) ?
+ SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
break;
}
rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
@@ -627,6 +630,7 @@ retry:
int
sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
+ struct sfc_port *port = &sa->port;
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
struct sfc_evq *evq;
@@ -661,7 +665,7 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
rxq->state |= SFC_RXQ_STARTED;
- if (sw_index == 0) {
+ if ((sw_index == 0) && !port->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0)
goto fail_mac_filter_default_rxq_set;
@@ -942,7 +946,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.mem_bar = sa->mem_bar.esb_base;
rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
socket_id, &info, &rxq->dp);
if (rc != 0)
goto fail_dp_rx_qcreate;
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 23230144..fc439cb6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -185,7 +185,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.mem_bar = sa->mem_bar.esb_base;
rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
- &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
socket_id, &info, &txq->dp);
if (rc != 0)
goto fail_dp_tx_qinit;
@@ -479,6 +479,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_txq *txq;
unsigned int retry_count;
unsigned int wait_count;
+ int rc;
sfc_log_init(sa, "TxQ = %u", sw_index);
@@ -502,8 +503,10 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
((txq->state & SFC_TXQ_FLUSHED) == 0) &&
(retry_count < SFC_TX_QFLUSH_ATTEMPTS);
++retry_count) {
- if (efx_tx_qflush(txq->common) != 0) {
- txq->state |= SFC_TXQ_FLUSH_FAILED;
+ rc = efx_tx_qflush(txq->common);
+ if (rc != 0) {
+ txq->state |= (rc == EALREADY) ?
+ SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
break;
}
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 836c3b2a..0e96b922 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -48,6 +48,7 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2_iobuf.c
#
# Export include files
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 54212b71..9c0d57cc 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -53,6 +53,7 @@
#include <rte_atomic.h>
#include "rte_eth_szedata2.h"
+#include "szedata2_iobuf.h"
#define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
#define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
@@ -64,7 +65,6 @@
#define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
#define RTE_SZEDATA2_DRIVER_NAME net_szedata2
-#define RTE_SZEDATA2_PCI_DRIVER_NAME "rte_szedata2_pmd"
#define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
@@ -1032,7 +1032,7 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->if_index = 0;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
@@ -1140,6 +1140,33 @@ eth_dev_close(struct rte_eth_dev *dev)
dev->data->nb_tx_queues = 0;
}
+/**
+ * Function takes value from first IBUF status register.
+ * Values in IBUF and OBUF should be same.
+ *
+ * @param internals
+ * Pointer to device private structure.
+ * @return
+ * Link speed constant.
+ */
+static inline enum szedata2_link_speed
+get_link_speed(const struct pmd_internals *internals)
+{
+ const volatile struct szedata2_ibuf *ibuf =
+ ibuf_ptr_by_index(internals->pci_rsc, 0);
+ uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
+ switch (speed) {
+ case 0x03:
+ return SZEDATA2_LINK_SPEED_10G;
+ case 0x04:
+ return SZEDATA2_LINK_SPEED_40G;
+ case 0x05:
+ return SZEDATA2_LINK_SPEED_100G;
+ default:
+ return SZEDATA2_LINK_SPEED_DEFAULT;
+ }
+}
+
static int
eth_link_update(struct rte_eth_dev *dev,
int wait_to_complete __rte_unused)
@@ -1149,11 +1176,11 @@ eth_link_update(struct rte_eth_dev *dev,
struct rte_eth_link *dev_link = &dev->data->dev_link;
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
+ const volatile struct szedata2_ibuf *ibuf;
+ uint32_t i;
+ bool link_is_up = false;
- switch (cgmii_link_speed(ibuf)) {
+ switch (get_link_speed(internals)) {
case SZEDATA2_LINK_SPEED_10G:
link.link_speed = ETH_SPEED_NUM_10G;
break;
@@ -1171,8 +1198,19 @@ eth_link_update(struct rte_eth_dev *dev,
/* szedata2 uses only full duplex */
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_status = (cgmii_ibuf_is_enabled(ibuf) &&
- cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN;
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
+ /*
+ * Link is considered up if at least one ibuf is enabled
+ * and up.
+ */
+ if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
+ link_is_up = true;
+ break;
+ }
+ }
+
+ link.link_status = (link_is_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
link.link_autoneg = ETH_LINK_SPEED_FIXED;
@@ -1187,15 +1225,12 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
- volatile struct szedata2_cgmii_obuf *);
-
- cgmii_ibuf_enable(ibuf);
- cgmii_obuf_enable(obuf);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++)
+ ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
+ for (i = 0; i < szedata2_obuf_count; i++)
+ obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
return 0;
}
@@ -1204,15 +1239,12 @@ eth_dev_set_link_down(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
- volatile struct szedata2_cgmii_obuf *);
-
- cgmii_ibuf_disable(ibuf);
- cgmii_obuf_disable(obuf);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++)
+ ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
+ for (i = 0; i < szedata2_obuf_count; i++)
+ obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
return 0;
}
@@ -1292,10 +1324,12 @@ eth_promiscuous_enable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_PROMISC);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_PROMISC);
+ }
}
static void
@@ -1303,10 +1337,12 @@ eth_promiscuous_disable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ }
}
static void
@@ -1314,10 +1350,12 @@ eth_allmulticast_enable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
+ }
}
static void
@@ -1325,10 +1363,12 @@ eth_allmulticast_disable(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = (struct pmd_internals *)
dev->data->dev_private;
- volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
- volatile struct szedata2_cgmii_ibuf *);
- cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ uint32_t i;
+
+ for (i = 0; i < szedata2_ibuf_count; i++) {
+ ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
+ SZEDATA2_MAC_CHMODE_ONLY_VALID);
+ }
}
static const struct eth_dev_ops ops = {
@@ -1431,7 +1471,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
struct szedata *szedata_temp;
int ret;
uint32_t szedata2_index;
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_pci_addr *pci_addr = &pci_dev->addr;
struct rte_mem_resource *pci_rsc =
&pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
@@ -1554,7 +1594,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
static int
rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_pci_addr *pci_addr = &pci_dev->addr;
rte_free(dev->data->mac_addrs);
diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h
index afe8a383..f25d4c59 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.h
+++ b/drivers/net/szedata2/rte_eth_szedata2.h
@@ -34,9 +34,11 @@
#ifndef RTE_PMD_SZEDATA2_H_
#define RTE_PMD_SZEDATA2_H_
-#include <stdbool.h>
+#include <stdint.h>
-#include <rte_byteorder.h>
+#include <libsze2.h>
+
+#include <rte_common.h>
/* PCI Vendor ID */
#define PCI_VENDOR_ID_NETCOPE 0x1b26
@@ -58,7 +60,7 @@
* Round 'what' to the nearest larger (or equal) multiple of '8'
* (szedata2 packet is aligned to 8 bytes)
*/
-#define RTE_SZE2_ALIGN8(what) (((what) + ((8) - 1)) & (~((8) - 1)))
+#define RTE_SZE2_ALIGN8(what) RTE_ALIGN(what, 8)
/*! main handle structure */
struct szedata {
@@ -113,338 +115,4 @@ struct szedata {
int numa_node;
};
-/*
- * @return Byte from PCI resource at offset "offset".
- */
-static inline uint8_t
-pci_resource_read8(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return *((uint8_t *)((uint8_t *)rsc->addr + offset));
-}
-
-/*
- * @return Two bytes from PCI resource starting at offset "offset".
- */
-static inline uint16_t
-pci_resource_read16(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *)rsc->addr +
- offset)));
-}
-
-/*
- * @return Four bytes from PCI resource starting at offset "offset".
- */
-static inline uint32_t
-pci_resource_read32(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *)rsc->addr +
- offset)));
-}
-
-/*
- * @return Eight bytes from PCI resource starting at offset "offset".
- */
-static inline uint64_t
-pci_resource_read64(struct rte_mem_resource *rsc, uint32_t offset)
-{
- return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *)rsc->addr +
- offset)));
-}
-
-/*
- * Write one byte to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write8(struct rte_mem_resource *rsc, uint32_t offset, uint8_t val)
-{
- *((uint8_t *)((uint8_t *)rsc->addr + offset)) = val;
-}
-
-/*
- * Write two bytes to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write16(struct rte_mem_resource *rsc, uint32_t offset,
- uint16_t val)
-{
- *((uint16_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_16(val);
-}
-
-/*
- * Write four bytes to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write32(struct rte_mem_resource *rsc, uint32_t offset,
- uint32_t val)
-{
- *((uint32_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_32(val);
-}
-
-/*
- * Write eight bytes to PCI resource address space at offset "offset".
- */
-static inline void
-pci_resource_write64(struct rte_mem_resource *rsc, uint32_t offset,
- uint64_t val)
-{
- *((uint64_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_64(val);
-}
-
-#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
- ((type)(((uint8_t *)(rsc)->addr) + (offset)))
-
-enum szedata2_link_speed {
- SZEDATA2_LINK_SPEED_DEFAULT = 0,
- SZEDATA2_LINK_SPEED_10G,
- SZEDATA2_LINK_SPEED_40G,
- SZEDATA2_LINK_SPEED_100G,
-};
-
-enum szedata2_mac_check_mode {
- SZEDATA2_MAC_CHMODE_PROMISC = 0x0,
- SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1,
- SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2,
- SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3,
-};
-
-/*
- * Structure describes CGMII IBUF address space
- */
-struct szedata2_cgmii_ibuf {
- /** Total Received Frames Counter low part */
- uint32_t trfcl;
- /** Correct Frames Counter low part */
- uint32_t cfcl;
- /** Discarded Frames Counter low part */
- uint32_t dfcl;
- /** Counter of frames discarded due to buffer overflow low part */
- uint32_t bodfcl;
- /** Total Received Frames Counter high part */
- uint32_t trfch;
- /** Correct Frames Counter high part */
- uint32_t cfch;
- /** Discarded Frames Counter high part */
- uint32_t dfch;
- /** Counter of frames discarded due to buffer overflow high part */
- uint32_t bodfch;
- /** IBUF enable register */
- uint32_t ibuf_en;
- /** Error mask register */
- uint32_t err_mask;
- /** IBUF status register */
- uint32_t ibuf_st;
- /** IBUF command register */
- uint32_t ibuf_cmd;
- /** Minimum frame length allowed */
- uint32_t mfla;
- /** Frame MTU */
- uint32_t mtu;
- /** MAC address check mode */
- uint32_t mac_chmode;
- /** Octets Received OK Counter low part */
- uint32_t orocl;
- /** Octets Received OK Counter high part */
- uint32_t oroch;
-} __rte_packed;
-
-/* Offset of CGMII IBUF memory for MAC addresses */
-#define SZEDATA2_CGMII_IBUF_MAC_MEM_OFF 0x80
-
-/*
- * @return
- * true if IBUF is enabled
- * false if IBUF is disabled
- */
-static inline bool
-cgmii_ibuf_is_enabled(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- return ((rte_le_to_cpu_32(ibuf->ibuf_en) & 0x1) != 0) ? true : false;
-}
-
-/*
- * Enables IBUF.
- */
-static inline void
-cgmii_ibuf_enable(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- ibuf->ibuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) | 0x1);
-}
-
-/*
- * Disables IBUF.
- */
-static inline void
-cgmii_ibuf_disable(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- ibuf->ibuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) & ~0x1);
-}
-
-/*
- * @return
- * true if ibuf link is up
- * false if ibuf link is down
- */
-static inline bool
-cgmii_ibuf_is_link_up(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- return ((rte_le_to_cpu_32(ibuf->ibuf_st) & 0x80) != 0) ? true : false;
-}
-
-/*
- * @return
- * MAC address check mode
- */
-static inline enum szedata2_mac_check_mode
-cgmii_ibuf_mac_mode_read(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- switch (rte_le_to_cpu_32(ibuf->mac_chmode) & 0x3) {
- case 0x0:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- case 0x1:
- return SZEDATA2_MAC_CHMODE_ONLY_VALID;
- case 0x2:
- return SZEDATA2_MAC_CHMODE_ALL_BROADCAST;
- case 0x3:
- return SZEDATA2_MAC_CHMODE_ALL_MULTICAST;
- default:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- }
-}
-
-/*
- * Writes "mode" in MAC address check mode register.
- */
-static inline void
-cgmii_ibuf_mac_mode_write(volatile struct szedata2_cgmii_ibuf *ibuf,
- enum szedata2_mac_check_mode mode)
-{
- ibuf->mac_chmode = rte_cpu_to_le_32(
- (rte_le_to_cpu_32(ibuf->mac_chmode) & ~0x3) | mode);
-}
-
-/*
- * Structure describes CGMII OBUF address space
- */
-struct szedata2_cgmii_obuf {
- /** Total Sent Frames Counter low part */
- uint32_t tsfcl;
- /** Octets Sent Counter low part */
- uint32_t oscl;
- /** Total Discarded Frames Counter low part */
- uint32_t tdfcl;
- /** reserved */
- uint32_t reserved1;
- /** Total Sent Frames Counter high part */
- uint32_t tsfch;
- /** Octets Sent Counter high part */
- uint32_t osch;
- /** Total Discarded Frames Counter high part */
- uint32_t tdfch;
- /** reserved */
- uint32_t reserved2;
- /** OBUF enable register */
- uint32_t obuf_en;
- /** reserved */
- uint64_t reserved3;
- /** OBUF control register */
- uint32_t ctrl;
- /** OBUF status register */
- uint32_t obuf_st;
-} __rte_packed;
-
-/*
- * @return
- * true if OBUF is enabled
- * false if OBUF is disabled
- */
-static inline bool
-cgmii_obuf_is_enabled(volatile struct szedata2_cgmii_obuf *obuf)
-{
- return ((rte_le_to_cpu_32(obuf->obuf_en) & 0x1) != 0) ? true : false;
-}
-
-/*
- * Enables OBUF.
- */
-static inline void
-cgmii_obuf_enable(volatile struct szedata2_cgmii_obuf *obuf)
-{
- obuf->obuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) | 0x1);
-}
-
-/*
- * Disables OBUF.
- */
-static inline void
-cgmii_obuf_disable(volatile struct szedata2_cgmii_obuf *obuf)
-{
- obuf->obuf_en =
- rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) & ~0x1);
-}
-
-/*
- * Function takes value from IBUF status register. Values in IBUF and OBUF
- * should be same.
- *
- * @return Link speed constant.
- */
-static inline enum szedata2_link_speed
-cgmii_link_speed(volatile struct szedata2_cgmii_ibuf *ibuf)
-{
- uint32_t speed = (rte_le_to_cpu_32(ibuf->ibuf_st) & 0x70) >> 4;
- switch (speed) {
- case 0x03:
- return SZEDATA2_LINK_SPEED_10G;
- case 0x04:
- return SZEDATA2_LINK_SPEED_40G;
- case 0x05:
- return SZEDATA2_LINK_SPEED_100G;
- default:
- return SZEDATA2_LINK_SPEED_DEFAULT;
- }
-}
-
-/*
- * IBUFs and OBUFs can generally be located at different offsets in different
- * firmwares.
- * This part defines base offsets of IBUFs and OBUFs through various firmwares.
- * Currently one firmware type is supported.
- * Type of firmware is set through configuration option
- * CONFIG_RTE_LIBRTE_PMD_SZEDATA_AS.
- * Possible values are:
- * 0 - for firmwares:
- * NIC_100G1_LR4
- * HANIC_100G1_LR4
- * HANIC_100G1_SR10
- */
-#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS)
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined"
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0
-
-/*
- * CGMII IBUF offset from the beginning of PCI resource address space.
- */
-#define SZEDATA2_CGMII_IBUF_BASE_OFF 0x8000
-/*
- * Size of CGMII IBUF.
- */
-#define SZEDATA2_CGMII_IBUF_SIZE 0x200
-
-/*
- * GCMII OBUF offset from the beginning of PCI resource address space.
- */
-#define SZEDATA2_CGMII_OBUF_BASE_OFF 0x9000
-/*
- * Size of CGMII OBUF.
- */
-#define SZEDATA2_CGMII_OBUF_SIZE 0x100
-
-#else
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file"
-#endif
-
-#endif
+#endif /* RTE_PMD_SZEDATA2_H_ */
diff --git a/drivers/net/szedata2/szedata2_iobuf.c b/drivers/net/szedata2/szedata2_iobuf.c
new file mode 100644
index 00000000..3b9a71fe
--- /dev/null
+++ b/drivers/net/szedata2/szedata2_iobuf.c
@@ -0,0 +1,203 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 CESNET
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of CESNET nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+#include "szedata2_iobuf.h"
+
+/*
+ * IBUFs and OBUFs can generally be located at different offsets in different
+ * firmwares (modes).
+ * This part defines base offsets of IBUFs and OBUFs for various cards
+ * and firmwares (modes).
+ * Type of firmware (mode) is set through configuration option
+ * CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS.
+ * Possible values are:
+ * 0 - for cards (modes):
+ * NFB-100G1 (100G1)
+ *
+ * 1 - for cards (modes):
+ * NFB-100G2Q (100G1)
+ *
+ * 2 - for cards (modes):
+ * NFB-40G2 (40G2)
+ * NFB-100G2C (100G2)
+ * NFB-100G2Q (40G2)
+ *
+ * 3 - for cards (modes):
+ * NFB-40G2 (10G8)
+ * NFB-100G2Q (10G8)
+ *
+ * 4 - for cards (modes):
+ * NFB-100G1 (10G10)
+ *
+ * 5 - for experimental firmwares and future use
+ */
+#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS)
+#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined"
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0
+
+/*
+ * Cards (modes):
+ * NFB-100G1 (100G1)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 1
+
+/*
+ * Cards (modes):
+ * NFB-100G2Q (100G1)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8800
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9800
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 2
+
+/*
+ * Cards (modes):
+ * NFB-40G2 (40G2)
+ * NFB-100G2C (100G2)
+ * NFB-100G2Q (40G2)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8800
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000,
+ 0x9800
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 3
+
+/*
+ * Cards (modes):
+ * NFB-40G2 (10G8)
+ * NFB-100G2Q (10G8)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8200,
+ 0x8400,
+ 0x8600,
+ 0x8800,
+ 0x8A00,
+ 0x8C00,
+ 0x8E00
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000,
+ 0x9200,
+ 0x9400,
+ 0x9600,
+ 0x9800,
+ 0x9A00,
+ 0x9C00,
+ 0x9E00
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 4
+
+/*
+ * Cards (modes):
+ * NFB-100G1 (10G10)
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8200,
+ 0x8400,
+ 0x8600,
+ 0x8800,
+ 0x8A00,
+ 0x8C00,
+ 0x8E00,
+ 0x9000,
+ 0x9200
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0xA000,
+ 0xA200,
+ 0xA400,
+ 0xA600,
+ 0xA800,
+ 0xAA00,
+ 0xAC00,
+ 0xAE00,
+ 0xB000,
+ 0xB200
+};
+
+#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 5
+
+/*
+ * Future use and experimental firmwares.
+ */
+
+const uint32_t szedata2_ibuf_base_table[] = {
+ 0x8000,
+ 0x8200,
+ 0x8400,
+ 0x8600,
+ 0x8800
+};
+const uint32_t szedata2_obuf_base_table[] = {
+ 0x9000,
+ 0x9200,
+ 0x9400,
+ 0x9600,
+ 0x9800
+};
+
+#else
+#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file"
+#endif
+
+const uint32_t szedata2_ibuf_count = RTE_DIM(szedata2_ibuf_base_table);
+const uint32_t szedata2_obuf_count = RTE_DIM(szedata2_obuf_base_table);
diff --git a/drivers/net/szedata2/szedata2_iobuf.h b/drivers/net/szedata2/szedata2_iobuf.h
new file mode 100644
index 00000000..f1ccb3b2
--- /dev/null
+++ b/drivers/net/szedata2/szedata2_iobuf.h
@@ -0,0 +1,356 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 CESNET
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of CESNET nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SZEDATA2_IOBUF_H_
+#define _SZEDATA2_IOBUF_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_dev.h>
+
+/* IBUF offsets from the beginning of the PCI resource address space. */
+extern const uint32_t szedata2_ibuf_base_table[];
+extern const uint32_t szedata2_ibuf_count;
+
+/* OBUF offsets from the beginning of the PCI resource address space. */
+extern const uint32_t szedata2_obuf_base_table[];
+extern const uint32_t szedata2_obuf_count;
+
+enum szedata2_link_speed {
+ SZEDATA2_LINK_SPEED_DEFAULT = 0,
+ SZEDATA2_LINK_SPEED_10G,
+ SZEDATA2_LINK_SPEED_40G,
+ SZEDATA2_LINK_SPEED_100G,
+};
+
+enum szedata2_mac_check_mode {
+ SZEDATA2_MAC_CHMODE_PROMISC = 0x0,
+ SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1,
+ SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2,
+ SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3,
+};
+
+/**
+ * Macro takes pointer to pci resource structure (rsc)
+ * and returns pointer to mapped resource memory at
+ * specified offset (offset) typecast to the type (type).
+ */
+#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
+ ((type)(((uint8_t *)(rsc)->addr) + (offset)))
+
+/**
+ * Maximum possible number of MAC addresses (limited by IBUF status
+ * register value MAC_COUNT which has 5 bits).
+ */
+#define SZEDATA2_IBUF_MAX_MAC_COUNT 32
+
+/**
+ * Structure describes IBUF address space.
+ */
+struct szedata2_ibuf {
+ /** Total Received Frames Counter low part */
+ uint32_t trfcl; /**< 0x00 */
+ /** Correct Frames Counter low part */
+ uint32_t cfcl; /**< 0x04 */
+ /** Discarded Frames Counter low part */
+ uint32_t dfcl; /**< 0x08 */
+ /** Counter of frames discarded due to buffer overflow low part */
+ uint32_t bodfcl; /**< 0x0C */
+ /** Total Received Frames Counter high part */
+ uint32_t trfch; /**< 0x10 */
+ /** Correct Frames Counter high part */
+ uint32_t cfch; /**< 0x14 */
+ /** Discarded Frames Counter high part */
+ uint32_t dfch; /**< 0x18 */
+ /** Counter of frames discarded due to buffer overflow high part */
+ uint32_t bodfch; /**< 0x1C */
+ /** IBUF enable register */
+ uint32_t ibuf_en; /**< 0x20 */
+ /** Error mask register */
+ uint32_t err_mask; /**< 0x24 */
+ /** IBUF status register */
+ uint32_t ibuf_st; /**< 0x28 */
+ /** IBUF command register */
+ uint32_t ibuf_cmd; /**< 0x2C */
+ /** Minimum frame length allowed */
+ uint32_t mfla; /**< 0x30 */
+ /** Frame MTU */
+ uint32_t mtu; /**< 0x34 */
+ /** MAC address check mode */
+ uint32_t mac_chmode; /**< 0x38 */
+ /** Octets Received OK Counter low part */
+ uint32_t orocl; /**< 0x3C */
+ /** Octets Received OK Counter high part */
+ uint32_t oroch; /**< 0x40 */
+ /** reserved */
+ uint8_t reserved[60]; /**< 0x4C */
+ /** IBUF memory for MAC addresses */
+ uint32_t mac_mem[2 * SZEDATA2_IBUF_MAX_MAC_COUNT]; /**< 0x80 */
+} __rte_packed;
+
+/**
+ * Structure describes OBUF address space.
+ */
+struct szedata2_obuf {
+ /** Total Sent Frames Counter low part */
+ uint32_t tsfcl; /**< 0x00 */
+ /** Octets Sent Counter low part */
+ uint32_t oscl; /**< 0x04 */
+ /** Total Discarded Frames Counter low part */
+ uint32_t tdfcl; /**< 0x08 */
+ /** reserved */
+ uint32_t reserved1; /**< 0x0C */
+ /** Total Sent Frames Counter high part */
+ uint32_t tsfch; /**< 0x10 */
+ /** Octets Sent Counter high part */
+ uint32_t osch; /**< 0x14 */
+ /** Total Discarded Frames Counter high part */
+ uint32_t tdfch; /**< 0x18 */
+ /** reserved */
+ uint32_t reserved2; /**< 0x1C */
+ /** OBUF enable register */
+ uint32_t obuf_en; /**< 0x20 */
+ /** reserved */
+ uint64_t reserved3; /**< 0x24 */
+ /** OBUF control register */
+ uint32_t ctrl; /**< 0x2C */
+ /** OBUF status register */
+ uint32_t obuf_st; /**< 0x30 */
+} __rte_packed;
+
+/**
+ * Wrapper for reading 4 bytes from device memory in correct endianness.
+ *
+ * @param addr
+ * Address for reading.
+ * @return
+ * 4 B value.
+ */
+static inline uint32_t
+szedata2_read32(const volatile void *addr)
+{
+ return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+/**
+ * Wrapper for writing 4 bytes to device memory in correct endianness.
+ *
+ * @param value
+ * Value to write.
+ * @param addr
+ * Address for writing.
+ */
+static inline void
+szedata2_write32(uint32_t value, volatile void *addr)
+{
+ rte_write32(rte_cpu_to_le_32(value), addr);
+}
+
+/**
+ * Get pointer to IBUF structure according to specified index.
+ *
+ * @param rsc
+ * Pointer to base address of memory resource.
+ * @param index
+ * Index of IBUF.
+ * @return
+ * Pointer to IBUF structure.
+ */
+static inline struct szedata2_ibuf *
+ibuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
+{
+ if (index >= szedata2_ibuf_count)
+ index = szedata2_ibuf_count - 1;
+ return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_ibuf_base_table[index],
+ struct szedata2_ibuf *);
+}
+
+/**
+ * Get pointer to OBUF structure according to specified idnex.
+ *
+ * @param rsc
+ * Pointer to base address of memory resource.
+ * @param index
+ * Index of OBUF.
+ * @return
+ * Pointer to OBUF structure.
+ */
+static inline struct szedata2_obuf *
+obuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
+{
+ if (index >= szedata2_obuf_count)
+ index = szedata2_obuf_count - 1;
+ return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_obuf_base_table[index],
+ struct szedata2_obuf *);
+}
+
+/**
+ * Checks if IBUF is enabled.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @return
+ * true if IBUF is enabled.
+ * false if IBUF is disabled.
+ */
+static inline bool
+ibuf_is_enabled(const volatile struct szedata2_ibuf *ibuf)
+{
+ return ((szedata2_read32(&ibuf->ibuf_en) & 0x1) != 0) ? true : false;
+}
+
+/**
+ * Enables IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ */
+static inline void
+ibuf_enable(volatile struct szedata2_ibuf *ibuf)
+{
+ szedata2_write32(szedata2_read32(&ibuf->ibuf_en) | 0x1, &ibuf->ibuf_en);
+}
+
+/**
+ * Disables IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ */
+static inline void
+ibuf_disable(volatile struct szedata2_ibuf *ibuf)
+{
+ szedata2_write32(szedata2_read32(&ibuf->ibuf_en) & ~0x1,
+ &ibuf->ibuf_en);
+}
+
+/**
+ * Checks if link is up.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @return
+ * true if ibuf link is up.
+ * false if ibuf link is down.
+ */
+static inline bool
+ibuf_is_link_up(const volatile struct szedata2_ibuf *ibuf)
+{
+ return ((szedata2_read32(&ibuf->ibuf_st) & 0x80) != 0) ? true : false;
+}
+
+/**
+ * Get current MAC address check mode from IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @return
+ * MAC address check mode constant.
+ */
+static inline enum szedata2_mac_check_mode
+ibuf_mac_mode_read(const volatile struct szedata2_ibuf *ibuf)
+{
+ switch (szedata2_read32(&ibuf->mac_chmode) & 0x3) {
+ case 0x0:
+ return SZEDATA2_MAC_CHMODE_PROMISC;
+ case 0x1:
+ return SZEDATA2_MAC_CHMODE_ONLY_VALID;
+ case 0x2:
+ return SZEDATA2_MAC_CHMODE_ALL_BROADCAST;
+ case 0x3:
+ return SZEDATA2_MAC_CHMODE_ALL_MULTICAST;
+ default:
+ return SZEDATA2_MAC_CHMODE_PROMISC;
+ }
+}
+
+/**
+ * Writes mode in MAC address check mode register in IBUF.
+ *
+ * @param ibuf
+ * Pointer to IBUF structure.
+ * @param mode
+ * MAC address check mode to set.
+ */
+static inline void
+ibuf_mac_mode_write(volatile struct szedata2_ibuf *ibuf,
+ enum szedata2_mac_check_mode mode)
+{
+ szedata2_write32((szedata2_read32(&ibuf->mac_chmode) & ~0x3) | mode,
+ &ibuf->mac_chmode);
+}
+
+/**
+ * Checks if obuf is enabled.
+ *
+ * @param obuf
+ * Pointer to OBUF structure.
+ * @return
+ * true if OBUF is enabled.
+ * false if OBUF is disabled.
+ */
+static inline bool
+obuf_is_enabled(const volatile struct szedata2_obuf *obuf)
+{
+ return ((szedata2_read32(&obuf->obuf_en) & 0x1) != 0) ? true : false;
+}
+
+/**
+ * Enables OBUF.
+ *
+ * @param obuf
+ * Pointer to OBUF structure.
+ */
+static inline void
+obuf_enable(volatile struct szedata2_obuf *obuf)
+{
+ szedata2_write32(szedata2_read32(&obuf->obuf_en) | 0x1, &obuf->obuf_en);
+}
+
+/**
+ * Disables OBUF.
+ *
+ * @param obuf
+ * Pointer to OBUF structure.
+ */
+static inline void
+obuf_disable(volatile struct szedata2_obuf *obuf)
+{
+ szedata2_write32(szedata2_read32(&obuf->obuf_en) & ~0x1,
+ &obuf->obuf_en);
+}
+
+#endif /* _SZEDATA2_IOBUF_H_ */
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index e44de027..9acea839 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,6 +33,7 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
@@ -41,6 +42,8 @@
#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_net.h>
+#include <rte_debug.h>
+#include <rte_ip.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -72,6 +75,8 @@
#define ETH_TAP_IFACE_ARG "iface"
#define ETH_TAP_SPEED_ARG "speed"
#define ETH_TAP_REMOTE_ARG "remote"
+#define ETH_TAP_MAC_ARG "mac"
+#define ETH_TAP_MAC_FIXED "fixed"
#define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0)
#define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0)
@@ -82,6 +87,7 @@ static const char *valid_arguments[] = {
ETH_TAP_IFACE_ARG,
ETH_TAP_SPEED_ARG,
ETH_TAP_REMOTE_ARG,
+ ETH_TAP_MAC_ARG,
NULL
};
@@ -110,10 +116,6 @@ enum ioctl_mode {
REMOTE_ONLY,
};
-static int
-tap_ioctl(struct pmd_internals *pmd, unsigned long request,
- struct ifreq *ifr, int set, enum ioctl_mode mode);
-
static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
/* Tun/Tap allocation routine
@@ -122,7 +124,7 @@ static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
* supplied name.
*/
static int
-tun_alloc(struct pmd_internals *pmd, uint16_t qid)
+tun_alloc(struct pmd_internals *pmd)
{
struct ifreq ifr;
#ifdef IFF_MULTI_QUEUE
@@ -143,7 +145,7 @@ tun_alloc(struct pmd_internals *pmd, uint16_t qid)
fd = open(TUN_TAP_DEV_PATH, O_RDWR);
if (fd < 0) {
- RTE_LOG(ERR, PMD, "Unable to create TAP interface");
+ RTE_LOG(ERR, PMD, "Unable to create TAP interface\n");
goto error;
}
@@ -221,75 +223,6 @@ tun_alloc(struct pmd_internals *pmd, uint16_t qid)
strerror(errno));
}
- if (qid == 0) {
- struct ifreq ifr;
-
- /*
- * pmd->eth_addr contains the desired MAC, either from remote
- * or from a random assignment. Sync it with the tap netdevice.
- */
- ifr.ifr_hwaddr.sa_family = AF_LOCAL;
- rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
- ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
- goto error;
-
- pmd->if_index = if_nametoindex(pmd->name);
- if (!pmd->if_index) {
- RTE_LOG(ERR, PMD,
- "Could not find ifindex for %s: rte_flow won't be usable.\n",
- pmd->name);
- return fd;
- }
- if (!pmd->flower_support)
- return fd;
- if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD,
- "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
- pmd->name);
- return fd;
- }
- if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD,
- "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
- pmd->name);
- return fd;
- }
- if (pmd->remote_if_index) {
- /*
- * Flush usually returns negative value because it tries
- * to delete every QDISC (and on a running device, one
- * QDISC at least is needed). Ignore negative return
- * value.
- */
- qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
- if (qdisc_create_ingress(pmd->nlsk_fd,
- pmd->remote_if_index) < 0)
- goto remote_fail;
- LIST_INIT(&pmd->implicit_flows);
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_LOCAL_MAC) < 0)
- goto remote_fail;
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_BROADCAST) < 0)
- goto remote_fail;
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_BROADCASTV6) < 0)
- goto remote_fail;
- if (tap_flow_implicit_create(
- pmd, TAP_REMOTE_TX) < 0)
- goto remote_fail;
- }
- }
-
- return fd;
-
-remote_fail:
- RTE_LOG(ERR, PMD,
- "Could not set up remote flow rules for %s: remote disabled.\n",
- pmd->name);
- pmd->remote_if_index = 0;
- tap_flow_implicit_flush(pmd, NULL);
return fd;
error:
@@ -298,6 +231,60 @@ error:
return -1;
}
+static void
+tap_verify_csum(struct rte_mbuf *mbuf)
+{
+ uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
+ uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
+ uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+ unsigned int l2_len = sizeof(struct ether_hdr);
+ unsigned int l3_len;
+ uint16_t cksum = 0;
+ void *l3_hdr;
+ void *l4_hdr;
+
+ if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
+ l2_len += 4;
+ else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
+ l2_len += 8;
+ /* Don't verify checksum for packets with discontinuous L2 header */
+ if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
+ rte_pktmbuf_data_len(mbuf)))
+ return;
+ l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
+ if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
+ struct ipv4_hdr *iph = l3_hdr;
+
+ /* ihl contains the number of 4-byte words in the header */
+ l3_len = 4 * (iph->version_ihl & 0xf);
+ if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
+ return;
+
+ cksum = ~rte_raw_cksum(iph, l3_len);
+ mbuf->ol_flags |= cksum ?
+ PKT_RX_IP_CKSUM_BAD :
+ PKT_RX_IP_CKSUM_GOOD;
+ } else if (l3 == RTE_PTYPE_L3_IPV6) {
+ l3_len = sizeof(struct ipv6_hdr);
+ } else {
+ /* IPv6 extensions are not supported */
+ return;
+ }
+ if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
+ l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
+ /* Don't verify checksum for multi-segment packets. */
+ if (mbuf->nb_segs > 1)
+ return;
+ if (l3 == RTE_PTYPE_L3_IPV4)
+ cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
+ else if (l3 == RTE_PTYPE_L3_IPV6)
+ cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
+ mbuf->ol_flags |= cksum ?
+ PKT_RX_L4_CKSUM_BAD :
+ PKT_RX_L4_CKSUM_GOOD;
+ }
+}
+
/* Callback to handle the rx burst of packets to the correct interface and
* file descriptor(s) in a multi-queue setup.
*/
@@ -378,6 +365,8 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
seg->next = NULL;
mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
RTE_PTYPE_ALL_MASK);
+ if (rxq->rxmode->hw_ip_checksum)
+ tap_verify_csum(mbuf);
/* account for the receive frame */
bufs[num_rx++] = mbuf;
@@ -390,6 +379,56 @@ end:
return num_rx;
}
+static void
+tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
+ unsigned int l3_len)
+{
+ void *l3_hdr = packet + l2_len;
+
+ if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+ struct ipv4_hdr *iph = l3_hdr;
+ uint16_t cksum;
+
+ iph->hdr_checksum = 0;
+ cksum = rte_raw_cksum(iph, l3_len);
+ iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
+ }
+ if (ol_flags & PKT_TX_L4_MASK) {
+ uint16_t l4_len;
+ uint32_t cksum;
+ uint16_t *l4_cksum;
+ void *l4_hdr;
+
+ l4_hdr = packet + l2_len + l3_len;
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
+ else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
+ l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
+ else
+ return;
+ *l4_cksum = 0;
+ if (ol_flags & PKT_TX_IPV4) {
+ struct ipv4_hdr *iph = l3_hdr;
+
+ l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
+ cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
+ } else {
+ struct ipv6_hdr *ip6h = l3_hdr;
+
+ /* payload_len does not include ext headers */
+ l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
+ l3_len + sizeof(struct ipv6_hdr);
+ cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
+ }
+ cksum += rte_raw_cksum(l4_hdr, l4_len);
+ cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+ cksum = (~cksum) & 0xffff;
+ if (cksum == 0)
+ cksum = 0xffff;
+ *l4_cksum = cksum;
+ }
+}
+
/* Callback to handle sending packets from the tap interface
*/
static uint16_t
@@ -410,6 +449,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct iovec iovecs[mbuf->nb_segs + 1];
struct tun_pi pi = { .flags = 0 };
struct rte_mbuf *seg = mbuf;
+ char m_copy[mbuf->data_len];
int n;
int j;
@@ -425,6 +465,19 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
rte_pktmbuf_mtod(seg, void *);
seg = seg->next;
}
+ if (mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) {
+ /* Support only packets with all data in the same seg */
+ if (mbuf->nb_segs > 1)
+ break;
+ /* To change checksums, work on a copy of data. */
+ rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
+ rte_pktmbuf_data_len(mbuf));
+ tap_tx_offload(m_copy, mbuf->ol_flags,
+ mbuf->l2_len, mbuf->l3_len);
+ iovecs[1].iov_base = m_copy;
+ }
/* copy the tx frame data */
n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
if (n <= 0)
@@ -442,6 +495,24 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return num_tx;
}
+static const char *
+tap_ioctl_req2str(unsigned long request)
+{
+ switch (request) {
+ case SIOCSIFFLAGS:
+ return "SIOCSIFFLAGS";
+ case SIOCGIFFLAGS:
+ return "SIOCGIFFLAGS";
+ case SIOCGIFHWADDR:
+ return "SIOCGIFHWADDR";
+ case SIOCSIFHWADDR:
+ return "SIOCSIFHWADDR";
+ case SIOCSIFMTU:
+ return "SIOCSIFMTU";
+ }
+ return "UNKNOWN";
+}
+
static int
tap_ioctl(struct pmd_internals *pmd, unsigned long request,
struct ifreq *ifr, int set, enum ioctl_mode mode)
@@ -477,9 +548,7 @@ apply:
case SIOCSIFMTU:
break;
default:
- RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
- pmd->name);
- return -EINVAL;
+ RTE_ASSERT(!"unsupported request type: must not happen");
}
if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
goto error;
@@ -488,8 +557,8 @@ apply:
return 0;
error:
- RTE_LOG(ERR, PMD, "%s: ioctl(%lu) failed with error: %s\n",
- ifr->ifr_name, request, strerror(errno));
+ RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
+ __func__, tap_ioctl_req2str(request), strerror(errno), errno);
return -errno;
}
@@ -500,7 +569,7 @@ tap_link_set_down(struct rte_eth_dev *dev)
struct ifreq ifr = { .ifr_flags = IFF_UP };
dev->data->dev_link.link_status = ETH_LINK_DOWN;
- return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
}
static int
@@ -586,6 +655,13 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = NULL;
dev_info->speed_capa = tap_dev_speed_capa();
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ dev_info->tx_offload_capa =
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
}
static void
@@ -644,7 +720,7 @@ tap_stats_reset(struct rte_eth_dev *dev)
}
static void
-tap_dev_close(struct rte_eth_dev *dev __rte_unused)
+tap_dev_close(struct rte_eth_dev *dev)
{
int i;
struct pmd_internals *internals = dev->data->dev_private;
@@ -659,6 +735,12 @@ tap_dev_close(struct rte_eth_dev *dev __rte_unused)
internals->rxq[i].fd = -1;
internals->txq[i].fd = -1;
}
+
+ if (internals->remote_if_index) {
+ /* Restore initial remote state */
+ ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
+ &internals->remote_initial_flags);
+ }
}
static void
@@ -718,7 +800,7 @@ tap_promisc_enable(struct rte_eth_dev *dev)
dev->data->promiscuous = 1;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
}
@@ -730,7 +812,7 @@ tap_promisc_disable(struct rte_eth_dev *dev)
dev->data->promiscuous = 0;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
}
@@ -742,7 +824,7 @@ tap_allmulti_enable(struct rte_eth_dev *dev)
dev->data->all_multicast = 1;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
}
@@ -754,50 +836,51 @@ tap_allmulti_disable(struct rte_eth_dev *dev)
dev->data->all_multicast = 0;
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
- if (pmd->remote_if_index)
+ if (pmd->remote_if_index && !pmd->flow_isolate)
tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
}
-
static void
tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ enum ioctl_mode mode = LOCAL_ONLY;
struct ifreq ifr;
if (is_zero_ether_addr(mac_addr)) {
RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
- dev->data->name);
+ dev->device->name);
return;
}
/* Check the actual current MAC address on the tap netdevice */
- if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) != 0) {
- RTE_LOG(ERR, PMD,
- "%s: couldn't check current tap MAC address\n",
- dev->data->name);
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
return;
- }
if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
mac_addr))
return;
-
+ /* Check the current MAC address on the remote */
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0)
+ return;
+ if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ mode = LOCAL_AND_REMOTE;
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, LOCAL_AND_REMOTE) < 0)
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0)
return;
rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
- if (pmd->remote_if_index) {
+ if (pmd->remote_if_index && !pmd->flow_isolate) {
/* Replace MAC redirection rule after a MAC change */
if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
RTE_LOG(ERR, PMD,
"%s: Couldn't delete MAC redirection rule\n",
- dev->data->name);
+ dev->device->name);
return;
}
if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
RTE_LOG(ERR, PMD,
"%s: Couldn't add MAC redirection rule\n",
- dev->data->name);
+ dev->device->name);
}
}
@@ -809,30 +892,16 @@ tap_setup_queue(struct rte_eth_dev *dev,
struct pmd_internals *pmd = dev->data->dev_private;
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
- int fd;
+ int fd = rx->fd == -1 ? tx->fd : rx->fd;
- fd = rx->fd;
- if (fd < 0) {
- fd = tx->fd;
+ if (fd == -1) {
+ RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
+ pmd->name, qid);
+ fd = tun_alloc(pmd);
if (fd < 0) {
- RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
- pmd->name, qid);
- fd = tun_alloc(pmd, qid);
- if (fd < 0) {
- RTE_LOG(ERR, PMD, "tun_alloc(%s, %d) failed\n",
- pmd->name, qid);
- return -1;
- }
- if (qid == 0) {
- struct ifreq ifr;
-
- ifr.ifr_mtu = dev->data->mtu;
- if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1,
- LOCAL_AND_REMOTE) < 0) {
- close(fd);
- return -1;
- }
- }
+ RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
+ pmd->name);
+ return -1;
}
}
@@ -845,26 +914,6 @@ tap_setup_queue(struct rte_eth_dev *dev,
}
static int
-rx_setup_queue(struct rte_eth_dev *dev,
- struct pmd_internals *internals,
- uint16_t qid)
-{
- dev->data->rx_queues[qid] = &internals->rxq[qid];
-
- return tap_setup_queue(dev, internals, qid);
-}
-
-static int
-tx_setup_queue(struct rte_eth_dev *dev,
- struct pmd_internals *internals,
- uint16_t qid)
-{
- dev->data->tx_queues[qid] = &internals->txq[qid];
-
- return tap_setup_queue(dev, internals, qid);
-}
-
-static int
tap_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
uint16_t nb_rx_desc,
@@ -894,17 +943,18 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
rxq->trigger_seen = 1; /* force initial burst */
rxq->in_port = dev->data->port_id;
rxq->nb_rx_desc = nb_desc;
- iovecs = rte_zmalloc_socket(dev->data->name, sizeof(*iovecs), 0,
+ iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
socket_id);
if (!iovecs) {
RTE_LOG(WARNING, PMD,
"%s: Couldn't allocate %d RX descriptors\n",
- dev->data->name, nb_desc);
+ dev->device->name, nb_desc);
return -ENOMEM;
}
rxq->iovecs = iovecs;
- fd = rx_setup_queue(dev, internals, rx_queue_id);
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ fd = tap_setup_queue(dev, internals, rx_queue_id);
if (fd == -1) {
ret = fd;
goto error;
@@ -918,7 +968,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
if (!*tmp) {
RTE_LOG(WARNING, PMD,
"%s: couldn't allocate memory for queue %d\n",
- dev->data->name, rx_queue_id);
+ dev->device->name, rx_queue_id);
ret = -ENOMEM;
goto error;
}
@@ -955,7 +1005,8 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
if (tx_queue_id >= internals->nb_queues)
return -1;
- ret = tx_setup_queue(dev, internals, tx_queue_id);
+ dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
+ ret = tap_setup_queue(dev, internals, tx_queue_id);
if (ret == -1)
return -1;
@@ -1115,32 +1166,16 @@ static const struct eth_dev_ops ops = {
.filter_ctrl = tap_dev_filter_ctrl,
};
-static int
-tap_kernel_support(struct pmd_internals *pmd)
-{
- struct utsname utsname;
- int ver[3];
-
- if (uname(&utsname) == -1 ||
- sscanf(utsname.release, "%d.%d.%d",
- &ver[0], &ver[1], &ver[2]) != 3)
- return 0;
- if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= FLOWER_KERNEL_VERSION)
- pmd->flower_support = 1;
- if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >=
- FLOWER_VLAN_KERNEL_VERSION)
- pmd->flower_vlan_support = 1;
- return 1;
-}
static int
eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
- char *remote_iface)
+ char *remote_iface, int fixed_mac_type)
{
int numa_node = rte_socket_id();
struct rte_eth_dev *dev;
struct pmd_internals *pmd;
struct rte_eth_dev_data *data;
+ struct ifreq ifr;
int i;
RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
@@ -1174,7 +1209,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
data->dev_private = pmd;
data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
data->numa_node = numa_node;
- data->drv_name = pmd_tap_drv.driver.name;
data->dev_link = pmd_link;
data->mac_addrs = &pmd->eth_addr;
@@ -1195,41 +1229,132 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
pmd->txq[i].fd = -1;
}
- tap_kernel_support(pmd);
- if (!pmd->flower_support)
- return 0;
- LIST_INIT(&pmd->flows);
+ if (fixed_mac_type) {
+ /* fixed mac = 00:64:74:61:70:<iface_idx> */
+ static int iface_idx;
+ char mac[ETHER_ADDR_LEN] = "\0dtap";
+
+ mac[ETHER_ADDR_LEN - 1] = iface_idx++;
+ rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN);
+ } else {
+ eth_random_addr((uint8_t *)&pmd->eth_addr);
+ }
+
+ /* Immediately create the netdevice (this will create the 1st queue). */
+ if (tap_setup_queue(dev, pmd, 0) == -1)
+ goto error_exit;
+
+ ifr.ifr_mtu = dev->data->mtu;
+ if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
+ goto error_exit;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
+ goto error_exit;
+
/*
- * If no netlink socket can be created, then it will fail when
- * creating/destroying flow rules.
+ * Set up everything related to rte_flow:
+ * - netlink socket
+ * - tap / remote if_index
+ * - mandatory QDISCs
+ * - rte_flow actual/implicit lists
+ * - implicit rules
*/
pmd->nlsk_fd = nl_init(0);
- if (strlen(remote_iface)) {
- struct ifreq ifr;
+ if (pmd->nlsk_fd == -1) {
+ RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ pmd->if_index = if_nametoindex(pmd->name);
+ if (!pmd->if_index) {
+ RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name);
+ goto disable_rte_flow;
+ }
+ if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ pmd->name);
+ goto disable_rte_flow;
+ }
+ LIST_INIT(&pmd->flows);
+ if (strlen(remote_iface)) {
pmd->remote_if_index = if_nametoindex(remote_iface);
- snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
- "%s", remote_iface);
if (!pmd->remote_if_index) {
- RTE_LOG(ERR, PMD, "Could not find %s ifindex: "
- "remote interface will remain unconfigured\n",
- remote_iface);
- return 0;
+ RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n",
+ pmd->name, remote_iface);
+ goto error_remote;
}
+ snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
+ "%s", remote_iface);
+
+ /* Save state of remote device */
+ tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
+
+ /* Replicate remote MAC address */
if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
- RTE_LOG(ERR, PMD, "Could not get remote MAC address\n");
- goto error_exit;
+ RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ pmd->name, pmd->remote_iface);
+ goto error_remote;
}
rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
ETHER_ADDR_LEN);
- } else {
- eth_random_addr((uint8_t *)&pmd->eth_addr);
+ /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ pmd->name, remote_iface);
+ goto error_remote;
+ }
+
+ /*
+ * Flush usually returns negative value because it tries to
+ * delete every QDISC (and on a running device, one QDISC at
+ * least is needed). Ignore negative return value.
+ */
+ qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
+ if (qdisc_create_ingress(pmd->nlsk_fd,
+ pmd->remote_if_index) < 0) {
+ RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ pmd->remote_iface);
+ goto error_remote;
+ }
+ LIST_INIT(&pmd->implicit_flows);
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
+ tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
+ RTE_LOG(ERR, PMD,
+ "%s: failed to create implicit rules.\n",
+ pmd->name);
+ goto error_remote;
+ }
}
return 0;
+disable_rte_flow:
+ RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
+ strerror(errno), errno);
+ if (strlen(remote_iface)) {
+ RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
+ goto error_exit;
+ }
+ return 0;
+
+error_remote:
+ RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
+ strerror(errno), errno);
+ tap_flow_implicit_flush(pmd, NULL);
+
error_exit:
- RTE_LOG(DEBUG, PMD, "TAP Unable to initialize %s\n",
+ RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
rte_vdev_device_name(vdev));
rte_free(data);
@@ -1275,6 +1400,17 @@ set_remote_iface(const char *key __rte_unused,
return 0;
}
+static int
+set_mac_type(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ if (value &&
+ !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED)))
+ *(int *)extra_args = 1;
+ return 0;
+}
+
/* Open a TAP interface device.
*/
static int
@@ -1286,6 +1422,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
int speed;
char tap_name[RTE_ETH_NAME_MAX_LEN];
char remote_iface[RTE_ETH_NAME_MAX_LEN];
+ int fixed_mac_type = 0;
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
@@ -1296,7 +1433,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
if (params && (params[0] != '\0')) {
- RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params);
+ RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
kvlist = rte_kvargs_parse(params, valid_arguments);
if (kvlist) {
@@ -1326,6 +1463,15 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
if (ret == -1)
goto leave;
}
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_MAC_ARG,
+ &set_mac_type,
+ &fixed_mac_type);
+ if (ret == -1)
+ goto leave;
+ }
}
}
pmd_link.link_speed = speed;
@@ -1333,7 +1479,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
name, tap_name);
- ret = eth_dev_tap_create(dev, tap_name, remote_iface);
+ ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type);
leave:
if (ret == -1) {
@@ -1364,7 +1510,7 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
return 0;
internals = eth_dev->data->dev_private;
- if (internals->flower_support && internals->nlsk_fd) {
+ if (internals->nlsk_fd) {
tap_flow_flush(eth_dev, NULL);
tap_flow_implicit_flush(internals, NULL);
nl_final(internals->nlsk_fd);
@@ -1391,4 +1537,5 @@ RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
RTE_PMD_REGISTER_PARAM_STRING(net_tap,
ETH_TAP_IFACE_ARG "=<string> "
ETH_TAP_SPEED_ARG "=<int> "
+ ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
ETH_TAP_REMOTE_ARG "=<string>");
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index ad497b3d..928a0454 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -37,6 +37,7 @@
#include <sys/queue.h>
#include <sys/uio.h>
#include <inttypes.h>
+#include <net/if.h>
#include <linux/if_tun.h>
@@ -83,12 +84,12 @@ struct pmd_internals {
char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
uint16_t nb_queues; /* Number of queues supported */
struct ether_addr eth_addr; /* Mac address of the device port */
+ struct ifreq remote_initial_flags; /* Remote netdevice flags on init */
int remote_if_index; /* remote netdevice IF_INDEX */
int if_index; /* IF_INDEX for the port */
int ioctl_sock; /* socket for ioctl calls */
int nlsk_fd; /* Netlink socket fd */
- int flower_support; /* 1 if kernel supports, else 0 */
- int flower_vlan_support; /* 1 if kernel supports, else 0 */
+ int flow_isolate; /* 1 if flow isolation is enabled */
LIST_HEAD(tap_flows, rte_flow) flows; /* rte_flow rules */
/* implicit rte_flow rules set when a remote device is active */
LIST_HEAD(tap_implicit_flows, rte_flow) implicit_flows;
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index a0dd5048..41f73452 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -82,6 +82,8 @@ enum {
};
#endif
+#define ISOLATE_HANDLE 1
+
struct rte_flow {
LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
struct rte_flow *remote_flow; /* associated remote flow */
@@ -98,6 +100,7 @@ struct convert_data {
struct remote_rule {
struct rte_flow_attr attr;
struct rte_flow_item items[2];
+ struct rte_flow_action actions[2];
int mirred;
};
@@ -126,11 +129,17 @@ tap_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
+static int
+tap_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error);
+
static const struct rte_flow_ops tap_flow_ops = {
.validate = tap_flow_validate,
.create = tap_flow_create,
.destroy = tap_flow_destroy,
.flush = tap_flow_flush,
+ .isolate = tap_flow_isolate,
};
/* Static initializer for items. */
@@ -258,6 +267,47 @@ static const struct tap_flow_items tap_flow_items[] = {
},
};
+/*
+ * TC rules, by growing priority
+ *
+ * Remote netdevice Tap netdevice
+ * +-------------+-------------+ +-------------+-------------+
+ * | Ingress | Egress | | Ingress | Egress |
+ * |-------------|-------------| |-------------|-------------|
+ * | | \ / | | | REMOTE TX | prio 1
+ * | | \ / | | | \ / | prio 2
+ * | EXPLICIT | \ / | | EXPLICIT | \ / | .
+ * | | \ / | | | \ / | .
+ * | RULES | X | | RULES | X | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ * | . | / \ | | . | / \ | .
+ *
+ * .... .... .... ....
+ *
+ * | . | \ / | | . | \ / | .
+ * | . | \ / | | . | \ / | .
+ * | | \ / | | | \ / |
+ * | LOCAL_MAC | \ / | | \ / | \ / | last prio - 5
+ * | PROMISC | X | | \ / | X | last prio - 4
+ * | ALLMULTI | / \ | | X | / \ | last prio - 3
+ * | BROADCAST | / \ | | / \ | / \ | last prio - 2
+ * | BROADCASTV6 | / \ | | / \ | / \ | last prio - 1
+ * | xx | / \ | | ISOLATE | / \ | last prio
+ * +-------------+-------------+ +-------------+-------------+
+ *
+ * The implicit flow rules are stored in a list in with mandatorily the last two
+ * being the ISOLATE and REMOTE_TX rules. e.g.:
+ *
+ * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
+ *
+ * That enables tap_flow_isolate() to remove implicit rules by popping the list
+ * head and remove it as long as it applies on the remote netdevice. The
+ * implicit rule for TX redirection is not removed, as isolate concerns only
+ * incoming traffic.
+ */
+
static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
[TAP_REMOTE_LOCAL_MAC] = {
.attr = {
@@ -364,6 +414,19 @@ static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
},
.mirred = TCA_EGRESS_MIRROR,
},
+ [TAP_ISOLATE] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_ISOLATE,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
};
/**
@@ -971,16 +1034,13 @@ priv_flow_process(struct pmd_internals *pmd,
if (err)
goto exit_item_not_supported;
if (flow && cur_item->convert) {
- if (!pmd->flower_vlan_support &&
- cur_item->convert == tap_flow_create_vlan)
- goto exit_item_not_supported;
err = cur_item->convert(items, &data);
if (err)
goto exit_item_not_supported;
}
}
if (flow) {
- if (pmd->flower_vlan_support && data.vlan) {
+ if (data.vlan) {
nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
htons(ETH_P_8021Q));
nlattr_add16(&flow->msg.nh,
@@ -1168,7 +1228,8 @@ tap_flow_create(struct rte_eth_dev *dev,
"Kernel refused TC filter rule creation (%d): %s\n",
errno, strerror(errno));
rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "overlapping rules");
+ NULL,
+ "overlapping rules or Kernel too old for flower support");
goto fail;
}
LIST_INSERT_HEAD(&pmd->flows, flow, next);
@@ -1213,7 +1274,8 @@ tap_flow_create(struct rte_eth_dev *dev,
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "overlapping rules");
+ NULL,
+ "overlapping rules or Kernel too old for flower support");
goto fail;
}
flow->remote_flow = remote_flow;
@@ -1318,6 +1380,78 @@ tap_flow_destroy(struct rte_eth_dev *dev,
}
/**
+ * Enable/disable flow isolation.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_isolate(struct rte_eth_dev *dev,
+ int set,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ if (set)
+ pmd->flow_isolate = 1;
+ else
+ pmd->flow_isolate = 0;
+ /*
+ * If netdevice is there, setup appropriate flow rules immediately.
+ * Otherwise it will be set when bringing up the netdevice (tun_alloc).
+ */
+ if (!pmd->rxq[0].fd)
+ return 0;
+ if (set) {
+ struct rte_flow *flow;
+
+ while (1) {
+ flow = LIST_FIRST(&pmd->implicit_flows);
+ if (!flow)
+ break;
+ /*
+ * Remove all implicit rules on the remote.
+ * Keep the local rule to redirect packets on TX.
+ * Keep also the last implicit local rule: ISOLATE.
+ */
+ if (flow->msg.t.tcm_ifindex == pmd->if_index)
+ break;
+ if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
+ goto error;
+ }
+ /* Switch the TC rule according to pmd->flow_isolate */
+ if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
+ goto error;
+ } else {
+ /* Switch the TC rule according to pmd->flow_isolate */
+ if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
+ goto error;
+ if (!pmd->remote_if_index)
+ return 0;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
+ goto error;
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
+ goto error;
+ if (dev->data->promiscuous &&
+ tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
+ goto error;
+ if (dev->data->all_multicast &&
+ tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
+ goto error;
+ }
+ return 0;
+error:
+ pmd->flow_isolate = 0;
+ return -rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "TC rule creation failed");
+}
+
+/**
* Destroy all flows.
*
* @see rte_flow_flush()
@@ -1351,6 +1485,13 @@ tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
int tap_flow_implicit_create(struct pmd_internals *pmd,
enum implicit_rule_index idx)
{
+ uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
+ struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
+ struct rte_flow_action isolate_actions[2] = {
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
struct rte_flow_item *items = implicit_rte_flows[idx].items;
struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
struct rte_flow_item_eth eth_local = { .type = 0 };
@@ -1371,12 +1512,20 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
- RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow");
+ RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n");
goto fail;
}
msg = &remote_flow->msg;
if (idx == TAP_REMOTE_TX) {
if_index = pmd->if_index;
+ } else if (idx == TAP_ISOLATE) {
+ if_index = pmd->if_index;
+ /* Don't be exclusive for this rule, it can be changed later. */
+ flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
+ isolate_actions[0].type = pmd->flow_isolate ?
+ RTE_FLOW_ACTION_TYPE_DROP :
+ RTE_FLOW_ACTION_TYPE_PASSTHRU;
+ actions = isolate_actions;
} else if (idx == TAP_REMOTE_LOCAL_MAC) {
/*
* eth addr couldn't be set in implicit_rte_flows[] as it is not
@@ -1385,18 +1534,25 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
items = items_local;
}
- tc_init_msg(msg, if_index, RTM_NEWTFILTER,
- NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
- tap_flow_set_handle(remote_flow);
- if (priv_flow_process(pmd, attr, items, NULL, NULL,
+ /*
+ * The ISOLATE rule is always present and must have a static handle, as
+ * the action is changed whether the feature is enabled (DROP) or
+ * disabled (PASSTHRU).
+ */
+ if (idx == TAP_ISOLATE)
+ remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
+ else
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, actions, NULL,
remote_flow, implicit_rte_flows[idx].mirred)) {
RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
goto fail;
}
err = nl_send(pmd->nlsk_fd, &msg->nh);
if (err < 0) {
- RTE_LOG(ERR, PMD, "Failure sending nl request");
+ RTE_LOG(ERR, PMD, "Failure sending nl request\n");
goto fail;
}
err = nl_recv_ack(pmd->nlsk_fd);
@@ -1481,10 +1637,6 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- struct pmd_internals *pmd = dev->data->dev_private;
-
- if (!pmd->flower_support)
- return -ENOTSUP;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
@@ -1492,7 +1644,7 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &tap_flow_ops;
return 0;
default:
- RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported",
+ RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
(void *)dev, filter_type);
}
return -EINVAL;
diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h
index 94414f18..9e332b03 100644
--- a/drivers/net/tap/tap_flow.h
+++ b/drivers/net/tap/tap_flow.h
@@ -58,6 +58,7 @@
*/
enum implicit_rule_index {
TAP_REMOTE_TX,
+ TAP_ISOLATE,
TAP_REMOTE_BROADCASTV6,
TAP_REMOTE_BROADCAST,
TAP_REMOTE_ALLMULTI,
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index 706250b8..915ae945 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# Copyright(c) 2016 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Cavium Networks nor the names of its
+# * Neither the name of Cavium, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
index 49a2646d..a4fc04cb 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.c
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
index fb9b2484..2d9448aa 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.h
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index 04b3b69c..2634285e 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -448,7 +448,8 @@ nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
{
int val;
- val = ((uint32_t)log2(len) - len_shift);
+ val = nicvf_log2_u32(len) - len_shift;
+
assert(val >= NICVF_QSIZE_MIN_VAL);
assert(val <= NICVF_QSIZE_MAX_VAL);
return val;
@@ -585,6 +586,7 @@ nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
/* Enable send queue & set queue size */
+ sq_cfg.cq_limit = 0;
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
@@ -801,7 +803,7 @@ nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
return NICVF_ERR_RSS_GET_SZ;
assert(rss->rss_size > 0);
- rss->hash_bits = (uint8_t)log2(rss->rss_size);
+ rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
rss->ind_tbl[idx] = tbl[idx];
@@ -822,7 +824,8 @@ nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
return NICVF_ERR_RSS_GET_SZ;
assert(rss->rss_size > 0);
- rss->hash_bits = (uint8_t)log2(rss->rss_size);
+ rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
+
for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
tbl[idx] = rss->ind_tbl[idx];
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 14fb2feb..b7d0a3dc 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index 79f83c8d..0fe673e6 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -1084,7 +1084,8 @@ struct cq_cfg { union { struct {
struct sq_cfg { union { struct {
#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
- uint64_t reserved_20_63:44;
+ uint64_t reserved_32_63:32;
+ uint64_t cq_limit:8;
uint64_t ena:1;
uint64_t reserved_18_18:1;
uint64_t reset:1;
@@ -1102,7 +1103,8 @@ struct sq_cfg { union { struct {
uint64_t reset:1;
uint64_t reserved_18_18:1;
uint64_t ena:1;
- uint64_t reserved_20_63:44;
+ uint64_t cq_limit:8;
+ uint64_t reserved_32_63:32;
#endif
};
uint64_t value;
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
index a072f19d..e26319bb 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.c
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
index 8675fe8f..6724ae1c 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.h
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
index 36da1200..f821c56f 100644
--- a/drivers/net/thunderx/base/nicvf_plat.h
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -59,6 +59,7 @@
/* utils */
#include <rte_common.h>
#define nicvf_min(x, y) RTE_MIN(x, y)
+#define nicvf_log2_u32(x) rte_log2_u32(x)
/* byte order */
#include <rte_byteorder.h>
@@ -80,7 +81,7 @@
/* ARM64 specific functions */
#if defined(RTE_ARCH_ARM64)
#define nicvf_prefetch_store_keep(_ptr) ({\
- asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
+ asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (_ptr)); })
#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 2152029b..edc17f1d 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -111,7 +111,8 @@ nicvf_interrupt(void *arg)
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
if (dev->data->dev_conf.intr_conf.lsc)
nicvf_set_eth_link_status(nic, &dev->data->dev_link);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
@@ -1239,6 +1240,13 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
struct rte_mbuf mb_def;
RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) != 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) != 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) != 6);
mb_def.nb_segs = 1;
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
mb_def.port = rxq->port_id;
@@ -1366,11 +1374,11 @@ static void
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
@@ -2017,7 +2025,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
}
}
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
nic->device_id = pci_dev->id.device_id;
@@ -2079,7 +2087,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
goto fail;
}
- /* Detach port by returning postive error number */
+ /* Detach port by returning positive error number */
return ENOTSUP;
}
@@ -2164,7 +2172,8 @@ static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_nicvf_pmd = {
.id_table = pci_id_nicvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
+ RTE_PCI_DRV_INTR_LSC,
.probe = nicvf_eth_pci_probe,
.remove = nicvf_eth_pci_remove,
};
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index a74219fa..3734430f 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_logs.h b/drivers/net/thunderx/nicvf_logs.h
index 0667d468..a76d1987 100644
--- a/drivers/net/thunderx/nicvf_logs.h
+++ b/drivers/net/thunderx/nicvf_logs.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 6cae8341..e27776e6 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 3631ff22..cd1b754b 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 34c41b79..4ee6c3bb 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_svf.c b/drivers/net/thunderx/nicvf_svf.c
index f746e946..a7cc28a7 100644
--- a/drivers/net/thunderx/nicvf_svf.c
+++ b/drivers/net/thunderx/nicvf_svf.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/thunderx/nicvf_svf.h b/drivers/net/thunderx/nicvf_svf.h
index 6471aa57..e58ab9d1 100644
--- a/drivers/net/thunderx/nicvf_svf.h
+++ b/drivers/net/thunderx/nicvf_svf.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 257bf6d6..0dac5e60 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -607,7 +607,8 @@ new_device(int vid)
RTE_LOG(INFO, PMD, "New connection established\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
return 0;
}
@@ -661,7 +662,8 @@ destroy_device(int vid)
RTE_LOG(INFO, PMD, "Connection closed\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
static int
@@ -690,7 +692,8 @@ vring_state_changed(int vid, uint16_t vring, int enable)
RTE_LOG(INFO, PMD, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE,
+ NULL, NULL);
return 0;
}
@@ -973,6 +976,18 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+static uint32_t
+eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct vhost_queue *vq;
+
+ vq = dev->data->rx_queues[rx_queue_id];
+ if (vq == NULL)
+ return 0;
+
+ return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
@@ -984,6 +999,7 @@ static const struct eth_dev_ops ops = {
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.tx_done_cleanup = eth_tx_done_cleanup,
+ .rx_queue_count = eth_rx_queue_count,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 88118f1b..e320811e 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -71,7 +71,7 @@ static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
static void virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
@@ -89,16 +89,16 @@ static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
- uint32_t index, uint32_t vmdq __rte_unused);
+ uint32_t index, uint32_t vmdq);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
static void virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int virtio_dev_queue_stats_mapping_set(
- __rte_unused struct rte_eth_dev *eth_dev,
- __rte_unused uint16_t queue_id,
- __rte_unused uint8_t stat_idx,
- __rte_unused uint8_t is_rx);
+ struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
/*
* The set of PCI devices this driver supports
@@ -1229,7 +1229,8 @@ virtio_interrupt_handler(void *param)
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
@@ -1355,7 +1356,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
return -1;
if (!hw->virtio_user_dev) {
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
}
@@ -1537,8 +1538,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
- ret = virtio_remap_pci(RTE_DEV_TO_PCI(eth_dev->device),
- hw);
+ ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
return ret;
}
@@ -1567,7 +1567,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
* virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
*/
if (!hw->virtio_user_dev) {
- ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw);
+ ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
return ret;
}
@@ -1609,7 +1609,7 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
virtio_interrupt_handler,
eth_dev);
if (eth_dev->device)
- rte_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device));
+ rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
@@ -1659,37 +1659,26 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
- uint64_t req_features;
int ret;
PMD_INIT_LOG(DEBUG, "configure");
- req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
- if (rxmode->hw_ip_checksum)
- req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rxmode->enable_lro)
- req_features |=
- (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO6);
-
- /* if request features changed, reinit the device */
- if (req_features != hw->req_guest_features) {
- ret = virtio_init_device(dev, req_features);
+
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ ret = virtio_init_device(dev, hw->req_guest_features);
if (ret < 0)
return ret;
}
- if (rxmode->hw_ip_checksum &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ /* Virtio does L4 checksum but not L3! */
+ if (rxmode->hw_ip_checksum) {
PMD_DRV_LOG(NOTICE,
- "rx ip checksum not available on this host");
+ "virtio does not support IP checksum");
return -ENOTSUP;
}
- if (rxmode->enable_lro &&
- (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ if (rxmode->enable_lro) {
PMD_DRV_LOG(NOTICE,
- "lro not available on this host");
+ "virtio does not support Large Receive Offload");
return -ENOTSUP;
}
@@ -1894,7 +1883,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
- dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
+ dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -1915,8 +1904,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
- if ((host_features & tso_mask) == tso_mask)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index b7b3d615..e6da6802 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -38,6 +38,7 @@
#endif
#include <rte_io.h>
+#include <rte_bus.h>
#include "virtio_pci.h"
#include "virtio_logs.h"
@@ -579,6 +580,8 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
return base + offset;
}
+#define PCI_MSIX_ENABLE 0x8000
+
static int
virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
{
@@ -605,8 +608,17 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
break;
}
- if (cap.cap_vndr == PCI_CAP_ID_MSIX)
- hw->use_msix = 1;
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = 1;
+ }
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
PMD_INIT_LOG(DEBUG,
@@ -684,8 +696,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) {
if (dev->kdrv == RTE_KDRV_UNKNOWN &&
(!dev->device.devargs ||
- dev->device.devargs->type !=
- RTE_DEVTYPE_WHITELISTED_PCI)) {
+ dev->device.devargs->bus !=
+ rte_bus_find_by_name("pci"))) {
PMD_INIT_LOG(INFO,
"skip kernel managed virtio device.");
return 1;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index fbc96dfb..e30377c5 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -744,8 +744,9 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
virtio_rmb();
- num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
- num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
+ num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
+ if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
+ num = VIRTIO_MBUF_BURST_SZ;
if (likely(num > DESC_PER_CACHELINE))
num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index ecc62ada..6b40c7f7 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016
+ * Copyright (C) Cavium, Inc. 2016
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 450404ba..79412714 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -41,7 +41,6 @@
#include <sys/eventfd.h>
#include <sys/types.h>
#include <sys/stat.h>
-#include <unistd.h>
#include "vhost.h"
#include "virtio_user_dev.h"
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 280406c0..c9614443 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -35,7 +35,6 @@
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
-#include <sys/types.h>
#include <sys/socket.h>
#include <rte_malloc.h>
@@ -388,7 +387,7 @@ virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
}
/* Dev initialization routine. Invoked once for each virtio vdev at
- * EAL init time, see rte_eal_dev_init().
+ * EAL init time, see rte_bus_probe().
* Returns 0 on success.
*/
static int
@@ -556,7 +555,6 @@ virtio_user_pmd_remove(struct rte_vdev_device *vdev)
virtio_user_dev_uninit(dev);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 2b8092d9..39109919 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -57,7 +57,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
-#include <rte_atomic.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -83,10 +82,18 @@ static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
+static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats,
+ unsigned int n);
+static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned int n);
static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *
@@ -96,10 +103,8 @@ static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
+static void vmxnet3_interrupt_handler(void *param);
-#if PROCESS_SYS_EVENTS == 1
-static void vmxnet3_process_events(struct vmxnet3_hw *);
-#endif
/*
* The set of PCI devices this driver supports
*/
@@ -121,6 +126,8 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
.link_update = vmxnet3_dev_link_update,
.stats_get = vmxnet3_dev_stats_get,
+ .xstats_get_names = vmxnet3_dev_xstats_get_names,
+ .xstats_get = vmxnet3_dev_xstats_get,
.mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
@@ -132,6 +139,27 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.tx_queue_release = vmxnet3_dev_tx_queue_release,
};
+struct vmxnet3_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+/* tx_qX_ is prepended to the name string here */
+static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
+ {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
+ {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
+ {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
+ {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
+};
+
+/* rx_qX_ is prepended to the name string here */
+static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
+ {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
+ {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
+ {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
+ {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
+};
+
static const struct rte_memzone *
gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
const char *post_string, int socket_id,
@@ -141,7 +169,7 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->data->drv_name, dev->data->port_id, post_string);
+ dev->device->driver->name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
if (!reuse) {
@@ -221,10 +249,22 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
PMD_INIT_FUNC_TRACE();
hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
- for (i = 0; i < VMXNET3_MAX_INTRS; i++)
+ for (i = 0; i < hw->num_intrs; i++)
VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
}
+static void
+vmxnet3_enable_intr(struct vmxnet3_hw *hw)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->shared->devRead.intrConf.intrCtrl &= ~VMXNET3_IC_DISABLE_ALL;
+ for (i = 0; i < hw->num_intrs; i++)
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 0);
+}
+
/*
* Gets tx data ring descriptor size.
*/
@@ -259,7 +299,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/*
* for secondary processes, we don't initialize any further as primary
@@ -351,6 +391,10 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
hw->rxdata_desc_size);
+ /* clear shadow stats */
+ memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
+ memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
+
return 0;
}
@@ -392,7 +436,7 @@ static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_vmxnet3_pmd = {
.id_table = pci_id_vmxnet3_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = eth_vmxnet3_pci_probe,
.remove = eth_vmxnet3_pci_remove,
};
@@ -615,11 +659,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
/*
* Set number of interrupts to 1
- * PMD disables all the interrupts but this is MUST to activate device
- * It needs at least one interrupt for link events to handle
- * So we'll disable it later after device activation if needed
+ * PMD by default disables all the interrupts but this is MUST
+ * to activate device. It needs at least one interrupt for
+ * link events to handle
*/
- devRead->intrConf.numIntrs = 1;
+ hw->num_intrs = devRead->intrConf.numIntrs = 1;
devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
for (i = 0; i < hw->num_tx_queues; i++) {
@@ -689,7 +733,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
vmxnet3_dev_vlan_offload_set(dev,
ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
- vmxnet3_write_mac(hw, hw->perm_addr);
+ vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
return VMXNET3_SUCCESS;
}
@@ -707,10 +751,27 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ /* Save stats before it is reset by CMD_ACTIVATE */
+ vmxnet3_hw_stats_save(hw);
+
ret = vmxnet3_setup_driver_shared(dev);
if (ret != VMXNET3_SUCCESS)
return ret;
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ /* Setup interrupt callback */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ vmxnet3_interrupt_handler, dev);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) < 0) {
+ PMD_INIT_LOG(ERR, "interrupt enable failed");
+ return -EIO;
+ }
+ }
+
/* Exchange shared data with device */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
VMXNET3_GET_ADDR_LO(hw->sharedPA));
@@ -758,14 +819,19 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
/* Setting proper Rx Mode and issue Rx Mode Update command */
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
- /*
- * Don't need to handle events for now
- */
-#if PROCESS_SYS_EVENTS == 1
- events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
- PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
- vmxnet3_process_events(hw);
-#endif
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ vmxnet3_enable_intr(hw);
+
+ /*
+ * Update link state from device since this won't be
+ * done upon starting with lsc in use. This is done
+ * only after enabling interrupts to avoid any race
+ * where the link state could change without an
+ * interrupt being fired.
+ */
+ __vmxnet3_dev_link_update(dev, 0);
+ }
+
return VMXNET3_SUCCESS;
}
@@ -788,6 +854,15 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
/* disable interrupts */
vmxnet3_disable_intr(hw);
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ rte_intr_disable(&pci_dev->intr_handle);
+
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ vmxnet3_interrupt_handler, dev);
+ }
+
/* quiesce the device first */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
@@ -820,47 +895,190 @@ vmxnet3_dev_close(struct rte_eth_dev *dev)
}
static void
+vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_TxStats *res)
+{
+#define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
+ ((r)->f = (h)->tqd_start[(i)].stats.f + \
+ (h)->saved_tx_stats[(i)].f)
+
+ VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
+ VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
+
+#undef VMXNET3_UPDATE_TX_STAT
+}
+
+static void
+vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_RxStats *res)
+{
+#define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
+ ((r)->f = (h)->rqd_start[(i)].stats.f + \
+ (h)->saved_rx_stats[(i)].f)
+
+ VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
+ VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
+
+#undef VMXNET3_UPDATE_RX_STATS
+}
+
+static void
+vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
+{
+ unsigned int i;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
+
+ for (i = 0; i < hw->num_tx_queues; i++)
+ vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
+ for (i = 0; i < hw->num_rx_queues; i++)
+ vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
+}
+
+static int
+vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+ unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
+ dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
+
+ if (!xstats_names || n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ vmxnet3_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ vmxnet3_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+ unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
+ dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq == NULL)
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
+ xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
+ vmxnet3_rxq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq == NULL)
+ continue;
+
+ for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
+ xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
+ vmxnet3_txq_stat_strings[t].offset);
+ xstats[count].id = count;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void
vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned int i;
struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct UPT1_TxStats txStats;
+ struct UPT1_RxStats rxStats;
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
for (i = 0; i < hw->num_tx_queues; i++) {
- struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
+ vmxnet3_hw_tx_stats_get(hw, i, &txStats);
- stats->q_opackets[i] = txStats->ucastPktsTxOK +
- txStats->mcastPktsTxOK +
- txStats->bcastPktsTxOK;
- stats->q_obytes[i] = txStats->ucastBytesTxOK +
- txStats->mcastBytesTxOK +
- txStats->bcastBytesTxOK;
+ stats->q_opackets[i] = txStats.ucastPktsTxOK +
+ txStats.mcastPktsTxOK +
+ txStats.bcastPktsTxOK;
+
+ stats->q_obytes[i] = txStats.ucastBytesTxOK +
+ txStats.mcastBytesTxOK +
+ txStats.bcastBytesTxOK;
stats->opackets += stats->q_opackets[i];
stats->obytes += stats->q_obytes[i];
- stats->oerrors += txStats->pktsTxError + txStats->pktsTxDiscard;
+ stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
}
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
for (i = 0; i < hw->num_rx_queues; i++) {
- struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
+ vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
- stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
- rxStats->mcastPktsRxOK +
- rxStats->bcastPktsRxOK;
+ stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
+ rxStats.mcastPktsRxOK +
+ rxStats.bcastPktsRxOK;
- stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
- rxStats->mcastBytesRxOK +
- rxStats->bcastBytesRxOK;
+ stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
+ rxStats.mcastBytesRxOK +
+ rxStats.bcastBytesRxOK;
stats->ipackets += stats->q_ipackets[i];
stats->ibytes += stats->q_ibytes[i];
- stats->q_errors[i] = rxStats->pktsRxError;
- stats->ierrors += rxStats->pktsRxError;
- stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
+ stats->q_errors[i] = rxStats.pktsRxError;
+ stats->ierrors += rxStats.pktsRxError;
+ stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
}
}
@@ -868,7 +1086,7 @@ static void
vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
- dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
@@ -931,17 +1149,13 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
/* return 0 means link status changed, -1 means not changed */
static int
-vmxnet3_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete)
+__vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
struct rte_eth_link old = { 0 }, link;
uint32_t ret;
- /* Link status doesn't change for stopped dev */
- if (dev->data->dev_started == 0)
- return -1;
-
memset(&link, 0, sizeof(link));
vmxnet3_dev_atomic_read_link_status(dev, &old);
@@ -960,6 +1174,16 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
return (old.link_status == link.link_status) ? -1 : 0;
}
+static int
+vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ /* Link status doesn't change for stopped dev */
+ if (dev->data->dev_started == 0)
+ return -1;
+
+ return __vmxnet3_dev_link_update(dev, wait_to_complete);
+}
+
/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
static void
vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
@@ -995,7 +1219,10 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
- memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ else
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
@@ -1076,16 +1303,14 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
}
-#if PROCESS_SYS_EVENTS == 1
static void
-vmxnet3_process_events(struct vmxnet3_hw *hw)
+vmxnet3_process_events(struct rte_eth_dev *dev)
{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t events = hw->shared->ecr;
- if (!events) {
- PMD_INIT_LOG(ERR, "No events to process");
+ if (!events)
return;
- }
/*
* ECR bits when written with 1b are cleared. Hence write
@@ -1094,10 +1319,13 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
/* Check if link state has changed */
- if (events & VMXNET3_ECR_LINK)
- PMD_INIT_LOG(ERR,
- "Process events in %s(): VMXNET3_ECR_LINK event",
- __func__);
+ if (events & VMXNET3_ECR_LINK) {
+ PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
+ if (vmxnet3_dev_link_update(dev, 0) == 0)
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ }
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
@@ -1105,11 +1333,11 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
VMXNET3_CMD_GET_QUEUE_STATUS);
if (hw->tqd_start->status.stopped)
- PMD_INIT_LOG(ERR, "tq error 0x%x",
- hw->tqd_start->status.error);
+ PMD_DRV_LOG(ERR, "tq error 0x%x",
+ hw->tqd_start->status.error);
if (hw->rqd_start->status.stopped)
- PMD_INIT_LOG(ERR, "rq error 0x%x",
+ PMD_DRV_LOG(ERR, "rq error 0x%x",
hw->rqd_start->status.error);
/* Reset the device */
@@ -1117,12 +1345,23 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
}
if (events & VMXNET3_ECR_DIC)
- PMD_INIT_LOG(ERR, "Device implementation change event.");
+ PMD_DRV_LOG(DEBUG, "Device implementation change event.");
if (events & VMXNET3_ECR_DEBUG)
- PMD_INIT_LOG(ERR, "Debug event generated by device.");
+ PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
+}
+
+static void
+vmxnet3_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ vmxnet3_process_events(dev);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) < 0)
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
}
-#endif
RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 7a032629..b48058af 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -106,6 +106,8 @@ struct vmxnet3_hw {
uint16_t txdata_desc_size; /* tx data ring buffer size */
uint16_t rxdata_desc_size; /* rx data ring buffer size */
+ uint8_t num_intrs;
+
Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
@@ -122,6 +124,8 @@ struct vmxnet3_hw {
Vmxnet3_MemRegs *memRegs;
uint64_t memRegsPA;
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
+ UPT1_TxStats saved_tx_stats[VMXNET3_MAX_TX_QUEUES];
+ UPT1_RxStats saved_rx_stats[VMXNET3_MAX_RX_QUEUES];
};
#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index e865c675..d9cf4373 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -593,24 +593,40 @@ static inline void
vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
struct rte_mbuf *mbuf)
{
- uint32_t val = 0;
+ uint32_t val;
struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
struct Vmxnet3_RxDesc *rxd =
(struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
- if (ring_id == 0)
+ if (ring_id == 0) {
+ /* Usually: One HEAD type buf per packet
+ * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+ * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+ */
+
+ /* We use single packet buffer so all heads here */
val = VMXNET3_RXD_BTYPE_HEAD;
- else
+ } else {
+ /* All BODY type buffers for 2nd ring */
val = VMXNET3_RXD_BTYPE_BODY;
+ }
+ /*
+ * Load mbuf pointer into buf_info[ring_size]
+ * buf_info structure is equivalent to cookie for virtio-virtqueue
+ */
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+ /* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
+
+ /* After this point rxd->addr MUST not be NULL */
rxd->btype = val;
rxd->len = buf_info->len;
+ /* Flip gen bit at the end to change ownership */
rxd->gen = ring->gen;
vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -629,28 +645,11 @@ static int
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
{
int err = 0;
- uint32_t i = 0, val = 0;
+ uint32_t i = 0;
struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
- if (ring_id == 0) {
- /* Usually: One HEAD type buf per packet
- * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
- * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
- */
-
- /* We use single packet buffer so all heads here */
- val = VMXNET3_RXD_BTYPE_HEAD;
- } else {
- /* All BODY type buffers for 2nd ring */
- val = VMXNET3_RXD_BTYPE_BODY;
- }
-
while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
- struct Vmxnet3_RxDesc *rxd;
struct rte_mbuf *mbuf;
- vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
-
- rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
/* Allocate blank mbuf for the current Rx Descriptor */
mbuf = rte_mbuf_raw_alloc(rxq->mp);
@@ -661,25 +660,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
break;
}
- /*
- * Load mbuf pointer into buf_info[ring_size]
- * buf_info structure is equivalent to cookie for virtio-virtqueue
- */
- buf_info->m = mbuf;
- buf_info->len = (uint16_t)(mbuf->buf_len -
- RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
-
- /* Load Rx Descriptor with the buffer's GPA */
- rxd->addr = buf_info->bufPA;
-
- /* After this point rxd->addr MUST not be NULL */
- rxd->btype = val;
- rxd->len = buf_info->len;
- /* Flip gen bit at the end to change ownership */
- rxd->gen = ring->gen;
-
- vmxnet3_cmd_ring_adv_next2fill(ring);
+ vmxnet3_renew_desc(rxq, ring_id, mbuf);
i++;
}
@@ -800,6 +781,12 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
(int)(rcd - (struct Vmxnet3_RxCompDesc *)
rxq->comp_ring.base), rcd->rxdIdx);
rte_pktmbuf_free_seg(rxm);
+ if (rxq->start_seg) {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ rxq->start_seg = NULL;
+ rte_pktmbuf_free(start);
+ }
goto rcd_done;
}
@@ -901,7 +888,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c
index 7bd29fae..e404b775 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.c
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.c
@@ -672,7 +672,6 @@ eth_dev_xenvirt_create(const char *name, const char *params,
eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->drv_name = pmd_xenvirt_drv.driver.name;
eth_dev->data->numa_node = numa_node;
eth_dev->rx_pkt_burst = eth_xenvirt_rx;
diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h
index 350eae3e..1bb6877c 100644
--- a/drivers/net/xenvirt/virtqueue.h
+++ b/drivers/net/xenvirt/virtqueue.h
@@ -123,7 +123,7 @@ void virtqueue_dump(struct virtqueue *vq);
*/
struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_full(const struct virtqueue *vq)
{
return vq->vq_free_cnt == 0;
@@ -131,7 +131,7 @@ virtqueue_full(const struct virtqueue *vq)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
@@ -148,7 +148,7 @@ vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
vq->vq_ring.avail->idx++;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp;
@@ -171,7 +171,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
vq->vq_desc_head_idx = desc_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
{
const uint16_t needed = 1;
@@ -201,7 +201,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
{
@@ -242,7 +242,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
return 0;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
{
struct vring_used_elem *uep;