summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/baseband/meson.build7
-rw-r--r--drivers/baseband/null/meson.build7
-rw-r--r--drivers/bus/dpaa/Makefile4
-rw-r--r--drivers/bus/dpaa/base/fman/netcfg_layer.c2
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_driver.c17
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c72
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_driver.c7
-rw-r--r--drivers/bus/dpaa/dpaa_bus.c19
-rw-r--r--drivers/bus/dpaa/include/compat.h20
-rw-r--r--drivers/bus/dpaa/include/fsl_fman_crc64.h8
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h20
-rw-r--r--drivers/bus/dpaa/include/fsl_usd.h6
-rw-r--r--drivers/bus/dpaa/meson.build5
-rw-r--r--drivers/bus/dpaa/rte_bus_dpaa_version.map16
-rw-r--r--drivers/bus/dpaa/rte_dpaa_bus.h6
-rw-r--r--drivers/bus/fslmc/Makefile3
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c35
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c13
-rw-r--r--drivers/bus/fslmc/mc/dpbp.c10
-rw-r--r--drivers/bus/fslmc/mc/dpci.c197
-rw-r--r--drivers/bus/fslmc/mc/dpcon.c30
-rw-r--r--drivers/bus/fslmc/mc/dpdmai.c14
-rw-r--r--drivers/bus/fslmc/mc/dpio.c9
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp.h1
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp_cmd.h16
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci.h47
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci_cmd.h62
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon.h19
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai.h5
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h20
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng.h2
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpopr.h85
-rw-r--r--drivers/bus/fslmc/meson.build5
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c7
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c197
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h4
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h53
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h3
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h33
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c764
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.h30
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys.h100
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys_decl.h4
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map13
-rw-r--r--drivers/bus/ifpga/Makefile2
-rw-r--r--drivers/bus/ifpga/ifpga_bus.c33
-rw-r--r--drivers/bus/ifpga/meson.build2
-rw-r--r--drivers/bus/ifpga/rte_bus_ifpga.h3
-rw-r--r--drivers/bus/pci/Makefile5
-rw-r--r--drivers/bus/pci/bsd/pci.c6
-rw-r--r--drivers/bus/pci/linux/Makefile2
-rw-r--r--drivers/bus/pci/linux/pci.c46
-rw-r--r--drivers/bus/pci/linux/pci_vfio.c268
-rw-r--r--drivers/bus/pci/meson.build9
-rw-r--r--drivers/bus/pci/pci_common.c143
-rw-r--r--drivers/bus/pci/pci_common_uio.c33
-rw-r--r--drivers/bus/pci/pci_params.c78
-rw-r--r--drivers/bus/pci/private.h39
-rw-r--r--drivers/bus/pci/rte_bus_pci.h10
-rw-r--r--drivers/bus/vdev/Makefile5
-rw-r--r--drivers/bus/vdev/meson.build7
-rw-r--r--drivers/bus/vdev/vdev.c50
-rw-r--r--drivers/bus/vdev/vdev_params.c66
-rw-r--r--drivers/bus/vdev/vdev_private.h26
-rw-r--r--drivers/bus/vmbus/Makefile2
-rw-r--r--drivers/bus/vmbus/linux/vmbus_bus.c3
-rw-r--r--drivers/bus/vmbus/meson.build2
-rw-r--r--drivers/bus/vmbus/private.h6
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus.h15
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus_version.map7
-rw-r--r--drivers/bus/vmbus/vmbus_channel.c26
-rw-r--r--drivers/bus/vmbus/vmbus_common.c27
-rw-r--r--drivers/common/Makefile15
-rw-r--r--drivers/common/cpt/Makefile25
-rw-r--r--drivers/common/cpt/cpt_common.h91
-rw-r--r--drivers/common/cpt/cpt_hw_types.h522
-rw-r--r--drivers/common/cpt/cpt_mcode_defines.h386
-rw-r--r--drivers/common/cpt/cpt_pmd_logs.h50
-rw-r--r--drivers/common/cpt/cpt_pmd_ops_helper.c41
-rw-r--r--drivers/common/cpt/cpt_pmd_ops_helper.h34
-rw-r--r--drivers/common/cpt/cpt_request_mgr.h185
-rw-r--r--drivers/common/cpt/cpt_ucode.h3648
-rw-r--r--drivers/common/cpt/meson.build8
-rw-r--r--drivers/common/cpt/rte_common_cpt_version.map6
-rw-r--r--drivers/common/dpaax/Makefile31
-rw-r--r--drivers/common/dpaax/dpaax_iova_table.c465
-rw-r--r--drivers/common/dpaax/dpaax_iova_table.h105
-rw-r--r--drivers/common/dpaax/dpaax_logs.h39
-rw-r--r--drivers/common/dpaax/meson.build12
-rw-r--r--drivers/common/dpaax/rte_common_dpaax_version.map11
-rw-r--r--drivers/common/meson.build2
-rw-r--r--drivers/common/mvep/Makefile38
-rw-r--r--drivers/common/mvep/meson.build19
-rw-r--r--drivers/common/mvep/mvep_common.c45
-rw-r--r--drivers/common/mvep/rte_common_mvep_version.map6
-rw-r--r--drivers/common/mvep/rte_mvep_common.h21
-rw-r--r--drivers/common/qat/qat_common.h3
-rw-r--r--drivers/common/qat/qat_device.c60
-rw-r--r--drivers/common/qat/qat_device.h12
-rw-r--r--drivers/common/qat/qat_qp.c48
-rw-r--r--drivers/common/qat/qat_qp.h1
-rw-r--r--drivers/compress/octeontx/include/zip_regs.h4
-rw-r--r--drivers/compress/octeontx/otx_zip.h6
-rw-r--r--drivers/compress/octeontx/otx_zip_pmd.c2
-rw-r--r--drivers/compress/qat/qat_comp.c45
-rw-r--r--drivers/compress/qat/qat_comp.h13
-rw-r--r--drivers/compress/qat/qat_comp_pmd.c144
-rw-r--r--drivers/compress/qat/qat_comp_pmd.h3
-rw-r--r--drivers/crypto/Makefile8
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c88
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c8
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h6
-rw-r--r--drivers/crypto/aesni_gcm/meson.build12
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h89
-rw-r--r--drivers/crypto/aesni_mb/meson.build12
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c346
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c90
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h27
-rw-r--r--drivers/crypto/caam_jr/Makefile44
-rw-r--r--drivers/crypto/caam_jr/caam_jr.c2508
-rw-r--r--drivers/crypto/caam_jr/caam_jr_capabilities.c266
-rw-r--r--drivers/crypto/caam_jr/caam_jr_capabilities.h18
-rw-r--r--drivers/crypto/caam_jr/caam_jr_config.h207
-rw-r--r--drivers/crypto/caam_jr/caam_jr_desc.h285
-rw-r--r--drivers/crypto/caam_jr/caam_jr_hw.c367
-rw-r--r--drivers/crypto/caam_jr/caam_jr_hw_specific.h503
-rw-r--r--drivers/crypto/caam_jr/caam_jr_log.h42
-rw-r--r--drivers/crypto/caam_jr/caam_jr_pvt.h291
-rw-r--r--drivers/crypto/caam_jr/caam_jr_uio.c501
-rw-r--r--drivers/crypto/caam_jr/meson.build17
-rw-r--r--drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map4
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile11
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c788
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_event.h18
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h210
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc.h816
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/algo.h58
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/ipsec.h195
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/pdcp.h2796
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h346
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h22
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c128
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h25
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h73
-rw-r--r--drivers/crypto/dpaa2_sec/meson.build2
-rw-r--r--drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map8
-rw-r--r--drivers/crypto/dpaa_sec/Makefile2
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c314
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h3
-rw-r--r--drivers/crypto/kasumi/meson.build12
-rw-r--r--drivers/crypto/meson.build5
-rw-r--r--drivers/crypto/mvsam/Makefile5
-rw-r--r--drivers/crypto/mvsam/meson.build2
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd.c213
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_ops.c160
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_private.h34
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c2
-rw-r--r--drivers/crypto/octeontx/Makefile46
-rw-r--r--drivers/crypto/octeontx/meson.build18
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev.c133
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev.h20
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_capabilities.c604
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_capabilities.h17
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_hw_access.c598
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_hw_access.h320
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_mbox.c178
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_mbox.h92
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_ops.c531
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_ops.h18
-rw-r--r--drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map4
-rw-r--r--drivers/crypto/openssl/compat.h265
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c29
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c40
-rw-r--r--drivers/crypto/qat/qat_sym_capabilities.h20
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.c1
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.h2
-rw-r--r--drivers/crypto/qat/qat_sym_session.c190
-rw-r--r--drivers/crypto/qat/qat_sym_session.h3
-rw-r--r--drivers/crypto/scheduler/meson.build19
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c26
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h8
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c6
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c6
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c6
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c8
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c2
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h2
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c6
-rw-r--r--drivers/crypto/zuc/meson.build12
-rw-r--r--drivers/event/Makefile1
-rw-r--r--drivers/event/dpaa/Makefile1
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.c341
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.h17
-rw-r--r--drivers/event/dpaa2/Makefile8
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c462
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h24
-rw-r--r--drivers/event/dpaa2/meson.build5
-rw-r--r--drivers/event/dsw/Makefile29
-rw-r--r--drivers/event/dsw/dsw_evdev.c435
-rw-r--r--drivers/event/dsw/dsw_evdev.h279
-rw-r--r--drivers/event/dsw/dsw_event.c1253
-rw-r--r--drivers/event/dsw/dsw_sort.h48
-rw-r--r--drivers/event/dsw/dsw_xstats.c288
-rw-r--r--drivers/event/dsw/meson.build6
-rw-r--r--drivers/event/dsw/rte_pmd_dsw_event_version.map3
-rw-r--r--drivers/event/meson.build2
-rw-r--r--drivers/event/octeontx/Makefile2
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c83
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h5
-rw-r--r--drivers/event/octeontx/ssovf_worker.c44
-rw-r--r--drivers/event/octeontx/ssovf_worker.h1
-rw-r--r--drivers/event/opdl/Makefile2
-rw-r--r--drivers/event/opdl/meson.build11
-rw-r--r--drivers/event/opdl/rte_pmd_opdl_event_version.map (renamed from drivers/event/opdl/rte_pmd_evdev_opdl_version.map)0
-rw-r--r--drivers/event/sw/sw_evdev.c13
-rw-r--r--drivers/event/sw/sw_evdev.h8
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c20
-rw-r--r--drivers/event/sw/sw_evdev_selftest.c77
-rw-r--r--drivers/mempool/dpaa/Makefile2
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.c8
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.h4
-rw-r--r--drivers/mempool/dpaa2/Makefile3
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c38
-rw-r--r--drivers/mempool/dpaa2/meson.build2
-rw-r--r--drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map1
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.h2
-rw-r--r--drivers/meson.build19
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c12
-rw-r--r--drivers/net/ark/ark_ddm.c12
-rw-r--r--drivers/net/ark/ark_ddm.h7
-rw-r--r--drivers/net/ark/ark_ethdev.c4
-rw-r--r--drivers/net/ark/ark_ethdev_rx.c55
-rw-r--r--drivers/net/ark/ark_ethdev_tx.c2
-rw-r--r--drivers/net/atlantic/Makefile35
-rw-r--r--drivers/net/atlantic/atl_common.h96
-rw-r--r--drivers/net/atlantic/atl_ethdev.c1539
-rw-r--r--drivers/net/atlantic/atl_ethdev.h107
-rw-r--r--drivers/net/atlantic/atl_hw_regs.c52
-rw-r--r--drivers/net/atlantic/atl_hw_regs.h53
-rw-r--r--drivers/net/atlantic/atl_logs.h31
-rw-r--r--drivers/net/atlantic/atl_rxtx.c1357
-rw-r--r--drivers/net/atlantic/atl_types.h186
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_b0.c510
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_b0.h40
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h145
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_llh.c1490
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_llh.h714
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h2407
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils.c942
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils.h510
-rw-r--r--drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c618
-rw-r--r--drivers/net/atlantic/meson.build12
-rw-r--r--drivers/net/atlantic/rte_pmd_atlantic_version.map4
-rw-r--r--drivers/net/avf/Makefile2
-rw-r--r--drivers/net/avf/avf_ethdev.c22
-rw-r--r--drivers/net/avf/avf_rxtx.c29
-rw-r--r--drivers/net/avf/avf_rxtx.h12
-rw-r--r--drivers/net/avf/avf_rxtx_vec_sse.c2
-rw-r--r--drivers/net/avf/avf_vchnl.c2
-rw-r--r--drivers/net/avf/base/avf_osdep.h4
-rw-r--r--drivers/net/avf/base/meson.build23
-rw-r--r--drivers/net/avf/meson.build20
-rw-r--r--drivers/net/avp/avp_ethdev.c6
-rw-r--r--drivers/net/avp/meson.build3
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.c4
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.c2
-rw-r--r--drivers/net/bnx2x/bnx2x.c813
-rw-r--r--drivers/net/bnx2x/bnx2x.h42
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c92
-rw-r--r--drivers/net/bnx2x/bnx2x_logs.h25
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c30
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c35
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c50
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h20
-rw-r--r--drivers/net/bnx2x/ecore_init.h6
-rw-r--r--drivers/net/bnx2x/ecore_init_ops.h8
-rw-r--r--drivers/net/bnx2x/ecore_reg.h1
-rw-r--r--drivers/net/bnx2x/ecore_sp.c258
-rw-r--r--drivers/net/bnx2x/ecore_sp.h4
-rw-r--r--drivers/net/bnx2x/elink.c7908
-rw-r--r--drivers/net/bnx2x/elink.h236
-rw-r--r--drivers/net/bnxt/bnxt.h28
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c1
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c343
-rw-r--r--drivers/net/bnxt/bnxt_filter.c28
-rw-r--r--drivers/net/bnxt/bnxt_flow.c12
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c418
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h5
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c25
-rw-r--r--drivers/net/bnxt/bnxt_stats.c221
-rw-r--r--drivers/net/bnxt/bnxt_txr.c5
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c43
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h5489
-rw-r--r--drivers/net/bonding/Makefile1
-rw-r--r--drivers/net/bonding/meson.build1
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c48
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h2
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c215
-rw-r--r--drivers/net/bonding/rte_eth_bond_flow.c31
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c275
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h10
-rw-r--r--drivers/net/cxgbe/Makefile2
-rw-r--r--drivers/net/cxgbe/base/adapter.h4
-rw-r--r--drivers/net/cxgbe/base/common.h8
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c108
-rw-r--r--drivers/net/cxgbe/base/t4_msg.h44
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h8
-rw-r--r--drivers/net/cxgbe/base/t4_tcb.h5
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h52
-rw-r--r--drivers/net/cxgbe/cxgbe.h15
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c35
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.c117
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.h35
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.c287
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.h1
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c69
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c9
-rw-r--r--drivers/net/cxgbe/l2t.c227
-rw-r--r--drivers/net/cxgbe/l2t.h57
-rw-r--r--drivers/net/cxgbe/meson.build2
-rw-r--r--drivers/net/cxgbe/mps_tcam.c243
-rw-r--r--drivers/net/cxgbe/mps_tcam.h52
-rw-r--r--drivers/net/cxgbe/sge.c24
-rw-r--r--drivers/net/dpaa/Makefile1
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.c152
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.h5
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.c100
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.h5
-rw-r--r--drivers/net/dpaa2/Makefile3
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h40
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c192
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c95
-rw-r--r--drivers/net/dpaa2/mc/dpni.c134
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpkg.h71
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni.h378
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni_cmd.h87
-rw-r--r--drivers/net/dpaa2/mc/fsl_net.h2
-rw-r--r--drivers/net/dpaa2/meson.build2
-rw-r--r--drivers/net/e1000/Makefile1
-rw-r--r--drivers/net/e1000/base/e1000_82571.c5
-rw-r--r--drivers/net/e1000/base/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/base/meson.build3
-rw-r--r--drivers/net/e1000/e1000_ethdev.h6
-rw-r--r--drivers/net/e1000/em_ethdev.c8
-rw-r--r--drivers/net/e1000/em_rxtx.c15
-rw-r--r--drivers/net/e1000/igb_ethdev.c43
-rw-r--r--drivers/net/e1000/igb_flow.c10
-rw-r--r--drivers/net/e1000/igb_rxtx.c31
-rw-r--r--drivers/net/e1000/meson.build2
-rw-r--r--drivers/net/ena/ena_ethdev.c202
-rw-r--r--drivers/net/enetc/Makefile23
-rw-r--r--drivers/net/enetc/base/enetc_hw.h226
-rw-r--r--drivers/net/enetc/enetc.h113
-rw-r--r--drivers/net/enetc/enetc_ethdev.c629
-rw-r--r--drivers/net/enetc/enetc_logs.h40
-rw-r--r--drivers/net/enetc/enetc_rxtx.c239
-rw-r--r--drivers/net/enetc/meson.build11
-rw-r--r--drivers/net/enetc/rte_pmd_enetc_version.map4
-rw-r--r--drivers/net/enic/Makefile28
-rw-r--r--drivers/net/enic/base/vnic_dev.c105
-rw-r--r--drivers/net/enic/base/vnic_dev.h8
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h72
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_ethdev.c61
-rw-r--r--drivers/net/enic/enic_flow.c180
-rw-r--r--drivers/net/enic/enic_main.c81
-rw-r--r--drivers/net/enic/enic_res.c11
-rw-r--r--drivers/net/enic/enic_rxtx.c286
-rw-r--r--drivers/net/enic/enic_rxtx_common.h271
-rw-r--r--drivers/net/enic/enic_rxtx_vec_avx2.c831
-rw-r--r--drivers/net/enic/meson.build16
-rw-r--r--drivers/net/failsafe/failsafe.c11
-rw-r--r--drivers/net/failsafe/failsafe_args.c10
-rw-r--r--drivers/net/failsafe/failsafe_eal.c3
-rw-r--r--drivers/net/failsafe/failsafe_ether.c116
-rw-r--r--drivers/net/failsafe/failsafe_flow.c31
-rw-r--r--drivers/net/failsafe/failsafe_intr.c2
-rw-r--r--drivers/net/failsafe/failsafe_ops.c252
-rw-r--r--drivers/net/failsafe/failsafe_private.h15
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c2
-rw-r--r--drivers/net/fm10k/base/meson.build3
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c45
-rw-r--r--drivers/net/i40e/base/README2
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c50
-rw-r--r--drivers/net/i40e/base/i40e_adminq.h35
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h234
-rw-r--r--drivers/net/i40e/base/i40e_alloc.h35
-rw-r--r--drivers/net/i40e/base/i40e_common.c250
-rw-r--r--drivers/net/i40e/base/i40e_dcb.c46
-rw-r--r--drivers/net/i40e/base/i40e_dcb.h35
-rw-r--r--drivers/net/i40e/base/i40e_devids.h38
-rw-r--r--drivers/net/i40e/base/i40e_diag.c37
-rw-r--r--drivers/net/i40e/base/i40e_diag.h37
-rw-r--r--drivers/net/i40e/base/i40e_hmc.c35
-rw-r--r--drivers/net/i40e/base/i40e_hmc.h35
-rw-r--r--drivers/net/i40e/base/i40e_lan_hmc.c50
-rw-r--r--drivers/net/i40e/base/i40e_lan_hmc.h35
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c35
-rw-r--r--drivers/net/i40e/base/i40e_osdep.h40
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h76
-rw-r--r--drivers/net/i40e/base/i40e_register.h35
-rw-r--r--drivers/net/i40e/base/i40e_status.h35
-rw-r--r--drivers/net/i40e/base/i40e_type.h58
-rw-r--r--drivers/net/i40e/base/meson.build7
-rw-r--r--drivers/net/i40e/base/virtchnl.h35
-rw-r--r--drivers/net/i40e/i40e_ethdev.c241
-rw-r--r--drivers/net/i40e/i40e_ethdev.h7
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c26
-rw-r--r--drivers/net/i40e/i40e_flow.c49
-rw-r--r--drivers/net/i40e/i40e_rxtx.c172
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_vf_representor.c12
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c2
-rw-r--r--drivers/net/ifc/base/ifcvf.c33
-rw-r--r--drivers/net/ifc/base/ifcvf.h7
-rw-r--r--drivers/net/ifc/base/ifcvf_osdep.h2
-rw-r--r--drivers/net/ifc/ifcvf_vdpa.c102
-rw-r--r--drivers/net/ixgbe/base/README4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c45
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c50
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.h36
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c43
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb_82598.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb_82598.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb_82599.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb_82599.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h38
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c36
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h53
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.h35
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c59
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.h36
-rw-r--r--drivers/net/ixgbe/base/meson.build3
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c42
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c29
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c32
-rw-r--r--drivers/net/ixgbe/ixgbe_vf_representor.c10
-rw-r--r--drivers/net/ixgbe/meson.build3
-rw-r--r--drivers/net/kni/rte_eth_kni.c12
-rw-r--r--drivers/net/liquidio/lio_ethdev.c5
-rw-r--r--drivers/net/meson.build14
-rw-r--r--drivers/net/mlx4/meson.build102
-rw-r--r--drivers/net/mlx4/mlx4.c8
-rw-r--r--drivers/net/mlx4/mlx4_mr.c149
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c3
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h35
-rw-r--r--drivers/net/mlx5/Makefile43
-rw-r--r--drivers/net/mlx5/meson.build244
-rw-r--r--drivers/net/mlx5/mlx5.c154
-rw-r--r--drivers/net/mlx5/mlx5.h51
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c72
-rw-r--r--drivers/net/mlx5/mlx5_flow.c3351
-rw-r--r--drivers/net/mlx5/mlx5_flow.h375
-rw-r--r--drivers/net/mlx5/mlx5_flow_dv.c1492
-rw-r--r--drivers/net/mlx5/mlx5_flow_tcf.c2913
-rw-r--r--drivers/net/mlx5/mlx5_flow_verbs.c1825
-rw-r--r--drivers/net/mlx5/mlx5_glue.c113
-rw-r--r--drivers/net/mlx5/mlx5_glue.h34
-rw-r--r--drivers/net/mlx5/mlx5_mac.c2
-rw-r--r--drivers/net/mlx5/mlx5_mr.c155
-rw-r--r--drivers/net/mlx5/mlx5_nl_flow.c1248
-rw-r--r--drivers/net/mlx5/mlx5_prm.h222
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c3
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c48
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h41
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c46
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h1
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h11
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h10
-rw-r--r--drivers/net/mlx5/mlx5_socket.c2
-rw-r--r--drivers/net/mlx5/mlx5_stats.c87
-rw-r--r--drivers/net/mlx5/mlx5_txq.c5
-rw-r--r--drivers/net/mvneta/Makefile42
-rw-r--r--drivers/net/mvneta/meson.build28
-rw-r--r--drivers/net/mvneta/mvneta_ethdev.c987
-rw-r--r--drivers/net/mvneta/mvneta_ethdev.h79
-rw-r--r--drivers/net/mvneta/mvneta_rxtx.c1030
-rw-r--r--drivers/net/mvneta/mvneta_rxtx.h38
-rw-r--r--drivers/net/mvneta/rte_pmd_mvneta_version.map3
-rw-r--r--drivers/net/mvpp2/Makefile5
-rw-r--r--drivers/net/mvpp2/meson.build6
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.c510
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.h123
-rw-r--r--drivers/net/mvpp2/mrvl_flow.c161
-rw-r--r--drivers/net/mvpp2/mrvl_flow.h15
-rw-r--r--drivers/net/mvpp2/mrvl_mtr.c512
-rw-r--r--drivers/net/mvpp2/mrvl_mtr.h15
-rw-r--r--drivers/net/mvpp2/mrvl_qos.c246
-rw-r--r--drivers/net/mvpp2/mrvl_qos.h2
-rw-r--r--drivers/net/mvpp2/mrvl_tm.c1009
-rw-r--r--drivers/net/mvpp2/mrvl_tm.h15
-rw-r--r--drivers/net/netvsc/Makefile1
-rw-r--r--drivers/net/netvsc/hn_ethdev.c221
-rw-r--r--drivers/net/netvsc/hn_nvs.c24
-rw-r--r--drivers/net/netvsc/hn_nvs.h9
-rw-r--r--drivers/net/netvsc/hn_rndis.c47
-rw-r--r--drivers/net/netvsc/hn_rndis.h3
-rw-r--r--drivers/net/netvsc/hn_rxtx.c212
-rw-r--r--drivers/net/netvsc/hn_var.h66
-rw-r--r--drivers/net/netvsc/hn_vf.c549
-rw-r--r--drivers/net/netvsc/meson.build2
-rw-r--r--drivers/net/nfp/Makefile1
-rw-r--r--drivers/net/nfp/meson.build5
-rw-r--r--drivers/net/nfp/nfp_net.c77
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h2
-rw-r--r--drivers/net/null/rte_eth_null.c8
-rw-r--r--drivers/net/octeontx/base/meson.build6
-rw-r--r--drivers/net/octeontx/base/octeontx_io.h2
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c45
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.h3
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.c35
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.h33
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c186
-rw-r--r--drivers/net/qede/Makefile2
-rw-r--r--drivers/net/qede/base/bcm_osal.c2
-rw-r--r--drivers/net/qede/base/bcm_osal.h3
-rw-r--r--drivers/net/qede/base/common_hsi.h15
-rw-r--r--drivers/net/qede/base/ecore.h62
-rw-r--r--drivers/net/qede/base/ecore_cxt.c15
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c99
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h10
-rw-r--r--drivers/net/qede/base/ecore_dev.c1892
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h173
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h57
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h15
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h57
-rw-r--r--drivers/net/qede/base/ecore_hw.c127
-rw-r--r--drivers/net/qede/base/ecore_hw.h40
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c93
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h42
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c26
-rw-r--r--drivers/net/qede/base/ecore_int.c99
-rw-r--r--drivers/net/qede/base/ecore_int.h1
-rw-r--r--drivers/net/qede/base/ecore_int_api.h14
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h17
-rw-r--r--drivers/net/qede/base/ecore_iro.h164
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h42
-rw-r--r--drivers/net/qede/base/ecore_l2.c108
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h41
-rw-r--r--drivers/net/qede/base/ecore_mcp.c280
-rw-r--r--drivers/net/qede/base/ecore_mcp.h21
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h40
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h265
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c8
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h3
-rw-r--r--drivers/net/qede/base/ecore_spq.c56
-rw-r--r--drivers/net/qede/base/ecore_sriov.c94
-rw-r--r--drivers/net/qede/base/ecore_vf.c25
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h2
-rw-r--r--drivers/net/qede/base/eth_common.h5
-rw-r--r--drivers/net/qede/base/mcp_public.h44
-rw-r--r--drivers/net/qede/base/meson.build60
-rw-r--r--drivers/net/qede/base/reg_addr.h76
-rw-r--r--drivers/net/qede/meson.build12
-rw-r--r--drivers/net/qede/qede_ethdev.c743
-rw-r--r--drivers/net/qede/qede_ethdev.h69
-rw-r--r--drivers/net/qede/qede_fdir.c470
-rw-r--r--drivers/net/qede/qede_filter.c1546
-rw-r--r--drivers/net/qede/qede_main.c15
-rw-r--r--drivers/net/qede/qede_rxtx.c140
-rw-r--r--drivers/net/qede/qede_rxtx.h17
-rw-r--r--drivers/net/ring/rte_eth_ring.c7
-rw-r--r--drivers/net/sfc/base/ef10_ev.c38
-rw-r--r--drivers/net/sfc/base/ef10_filter.c64
-rw-r--r--drivers/net/sfc/base/ef10_image.c5
-rw-r--r--drivers/net/sfc/base/ef10_impl.h68
-rw-r--r--drivers/net/sfc/base/ef10_intr.c5
-rw-r--r--drivers/net/sfc/base/ef10_mac.c30
-rw-r--r--drivers/net/sfc/base/ef10_nic.c277
-rw-r--r--drivers/net/sfc/base/ef10_nvram.c161
-rw-r--r--drivers/net/sfc/base/ef10_phy.c105
-rw-r--r--drivers/net/sfc/base/ef10_rx.c89
-rw-r--r--drivers/net/sfc/base/ef10_signed_image_layout.h8
-rw-r--r--drivers/net/sfc/base/ef10_tx.c38
-rw-r--r--drivers/net/sfc/base/efx.h353
-rw-r--r--drivers/net/sfc/base/efx_annote.h103
-rw-r--r--drivers/net/sfc/base/efx_bootcfg.c641
-rw-r--r--drivers/net/sfc/base/efx_filter.c90
-rw-r--r--drivers/net/sfc/base/efx_impl.h10
-rw-r--r--drivers/net/sfc/base/efx_lic.c91
-rw-r--r--drivers/net/sfc/base/efx_mcdi.c168
-rw-r--r--drivers/net/sfc/base/efx_mcdi.h19
-rw-r--r--drivers/net/sfc/base/efx_mon.c745
-rw-r--r--drivers/net/sfc/base/efx_nic.c54
-rw-r--r--drivers/net/sfc/base/efx_nvram.c71
-rw-r--r--drivers/net/sfc/base/efx_phy.c60
-rw-r--r--drivers/net/sfc/base/efx_port.c2
-rw-r--r--drivers/net/sfc/base/efx_rx.c225
-rw-r--r--drivers/net/sfc/base/efx_tunnel.c6
-rw-r--r--drivers/net/sfc/base/efx_tx.c19
-rw-r--r--drivers/net/sfc/base/hunt_nic.c19
-rw-r--r--drivers/net/sfc/base/mc_driver_pcol_strs.h102
-rw-r--r--drivers/net/sfc/base/mcdi_mon.c381
-rw-r--r--drivers/net/sfc/base/mcdi_mon.h5
-rw-r--r--drivers/net/sfc/base/medford2_nic.c15
-rw-r--r--drivers/net/sfc/base/medford_nic.c15
-rw-r--r--drivers/net/sfc/base/meson.build3
-rw-r--r--drivers/net/sfc/base/siena_mac.c9
-rw-r--r--drivers/net/sfc/base/siena_nic.c7
-rw-r--r--drivers/net/sfc/base/siena_nvram.c5
-rw-r--r--drivers/net/sfc/base/siena_phy.c28
-rw-r--r--drivers/net/sfc/efsys.h38
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h5
-rw-r--r--drivers/net/sfc/sfc_ef10_essb_rx.c16
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c227
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c364
-rw-r--r--drivers/net/sfc/sfc_ethdev.c25
-rw-r--r--drivers/net/sfc/sfc_rx.c33
-rw-r--r--drivers/net/sfc/sfc_tso.c25
-rw-r--r--drivers/net/sfc/sfc_tso.h23
-rw-r--r--drivers/net/sfc/sfc_tx.c12
-rw-r--r--drivers/net/softnic/Makefile4
-rw-r--r--drivers/net/softnic/conn.c1
-rw-r--r--drivers/net/softnic/hash_func.h359
-rw-r--r--drivers/net/softnic/hash_func_arm64.h261
-rw-r--r--drivers/net/softnic/meson.build8
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c42
-rw-r--r--drivers/net/softnic/rte_eth_softnic_action.c67
-rw-r--r--drivers/net/softnic/rte_eth_softnic_cli.c883
-rw-r--r--drivers/net/softnic/rte_eth_softnic_cryptodev.c125
-rw-r--r--drivers/net/softnic/rte_eth_softnic_flow.c2287
-rw-r--r--drivers/net/softnic/rte_eth_softnic_internals.h219
-rw-r--r--drivers/net/softnic/rte_eth_softnic_meter.c728
-rw-r--r--drivers/net/softnic/rte_eth_softnic_pipeline.c161
-rw-r--r--drivers/net/softnic/rte_eth_softnic_thread.c118
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c10
-rw-r--r--drivers/net/tap/Makefile1
-rw-r--r--drivers/net/tap/meson.build45
-rw-r--r--drivers/net/tap/rte_eth_tap.c269
-rw-r--r--drivers/net/tap/rte_eth_tap.h10
-rw-r--r--drivers/net/tap/tap_bpf_insns.h4
-rw-r--r--drivers/net/tap/tap_flow.c5
-rw-r--r--drivers/net/tap/tap_intr.c5
-rw-r--r--drivers/net/thunderx/base/meson.build6
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c9
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h1
-rw-r--r--drivers/net/vdev_netvsc/meson.build22
-rw-r--r--drivers/net/vdev_netvsc/vdev_netvsc.c2
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c31
-rw-r--r--drivers/net/virtio/virtio_ethdev.c8
-rw-r--r--drivers/net/virtio/virtio_ethdev.h5
-rw-r--r--drivers/net/virtio/virtio_pci.c65
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c2
-rw-r--r--drivers/net/virtio/virtio_user/vhost.h4
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c65
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c56
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h2
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c179
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c35
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c4
-rw-r--r--drivers/net/vmxnet3/meson.build18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c88
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h3
-rw-r--r--drivers/raw/dpaa2_cmdif/Makefile3
-rw-r--r--drivers/raw/dpaa2_cmdif/meson.build2
-rw-r--r--drivers/raw/dpaa2_qdma/Makefile3
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.c18
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.h6
-rw-r--r--drivers/raw/dpaa2_qdma/meson.build2
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c8
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h8
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port.c6
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port_error.c2
-rw-r--r--drivers/raw/ifpga_rawdev/base/meson.build4
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.c5
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev.c2
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c3
684 files changed, 81726 insertions, 21408 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 75660765..7d5da5d9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -5,6 +5,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-y += common
DIRS-y += bus
+DEPDIRS-bus := common
DIRS-y += mempool
DEPDIRS-mempool := common bus
DIRS-y += net
diff --git a/drivers/baseband/meson.build b/drivers/baseband/meson.build
new file mode 100644
index 00000000..52489df3
--- /dev/null
+++ b/drivers/baseband/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+drivers = ['null']
+
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/baseband/null/meson.build b/drivers/baseband/null/meson.build
new file mode 100644
index 00000000..64c29d86
--- /dev/null
+++ b/drivers/baseband/null/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+deps += ['bbdev', 'bus_vdev', 'ring']
+name = 'bbdev_null'
+allow_experimental_apis = true
+sources = files('bbdev_null.c')
diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index bffaa9d9..800e5cd2 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -14,7 +14,6 @@ CFLAGS := -I$(SRCDIR) $(CFLAGS)
CFLAGS += -O3 $(WERROR_FLAGS)
CFLAGS += -Wno-pointer-arith
CFLAGS += -Wno-cast-qual
-CFLAGS += -D _GNU_SOURCE
CFLAGS += -I$(RTE_BUS_DPAA)/
CFLAGS += -I$(RTE_BUS_DPAA)/include
CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman
@@ -24,7 +23,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
# versioning export map
EXPORT_MAP := rte_bus_dpaa_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
#
@@ -48,5 +47,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
LDLIBS += -lpthread
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
index 031c6f1a..6b522420 100644
--- a/drivers/bus/dpaa/base/fman/netcfg_layer.c
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -21,7 +21,7 @@
/* This data structure contaings all configurations information
* related to usages of DPA devices.
*/
-struct netcfg_info *netcfg;
+static struct netcfg_info *netcfg;
/* fd to open a socket for making ioctl request to disable/enable shared
* interfaces.
*/
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index b14b5905..750b756b 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -23,7 +23,7 @@ static void *bman_ccsr_map;
/* Portal driver */
/*****************/
-static __thread int fd = -1;
+static __thread int bmfd = -1;
static __thread struct bm_portal_config pcfg;
static __thread struct dpaa_ioctl_portal_map map = {
.type = dpaa_portal_bman
@@ -70,14 +70,14 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
pcfg.index = map.index;
bman_depletion_fill(&pcfg.mask);
- fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
- if (fd == -1) {
+ bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (bmfd == -1) {
pr_err("BMan irq init failed");
process_portal_unmap(&map.addr);
return -EBUSY;
}
/* Use the IRQ FD as a unique IRQ number */
- pcfg.irq = fd;
+ pcfg.irq = bmfd;
portal = bman_create_affine_portal(&pcfg);
if (!portal) {
@@ -90,7 +90,7 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
/* Set the IRQ number */
irq_map.type = dpaa_portal_bman;
irq_map.portal_cinh = map.addr.cinh;
- process_portal_irq_map(fd, &irq_map);
+ process_portal_irq_map(bmfd, &irq_map);
return 0;
}
@@ -99,7 +99,7 @@ static int fsl_bman_portal_finish(void)
__maybe_unused const struct bm_portal_config *cfg;
int ret;
- process_portal_irq_unmap(fd);
+ process_portal_irq_unmap(bmfd);
cfg = bman_destroy_affine_portal();
DPAA_BUG_ON(cfg != &pcfg);
@@ -109,6 +109,11 @@ static int fsl_bman_portal_finish(void)
return ret;
}
+int bman_thread_fd(void)
+{
+ return bmfd;
+}
+
int bman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 7c17027f..dc64d089 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -852,11 +852,9 @@ mr_loop:
case QM_MR_VERB_FQPN:
/* Parked */
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq = get_fq_table_entry(
- be32_to_cpu(msg->fq.contextB));
+ fq = get_fq_table_entry(msg->fq.contextB);
#else
- fq = (void *)(uintptr_t)
- be32_to_cpu(msg->fq.contextB);
+ fq = (void *)(uintptr_t)msg->fq.contextB;
#endif
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
@@ -967,7 +965,6 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
*shadow = *dq;
dq = shadow;
shadow->fqid = be32_to_cpu(shadow->fqid);
- shadow->contextB = be32_to_cpu(shadow->contextB);
shadow->seqnum = be16_to_cpu(shadow->seqnum);
hw_fd_to_cpu(&shadow->fd);
#endif
@@ -1040,6 +1037,50 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
return limit;
}
+int qman_irqsource_add(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ bits = bits & QM_PIRQ_VISIBLE;
+
+ /* Clear any previously remaining interrupt conditions in
+ * QCSP_ISR. This prevents raising a false interrupt when
+ * interrupt conditions are enabled in QCSP_IER.
+ */
+ qm_isr_status_clear(&p->p, bits);
+ dpaa_set_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+
+
+ return 0;
+}
+
+int qman_irqsource_remove(u32 bits)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 ier;
+
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+
+ bits &= QM_PIRQ_VISIBLE;
+ dpaa_clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_isr_status_clear(&p->p, ~ier);
+ return 0;
+}
+
u16 qman_affine_channel(int cpu)
{
if (cpu < 0) {
@@ -1092,9 +1133,9 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
/* SDQCR: context_b points to the FQ */
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)];
+ fq = qman_fq_lookup_table[dq[rx_number]->contextB];
#else
- fq = (void *)be32_to_cpu(dq[rx_number]->contextB);
+ fq = (void *)dq[rx_number]->contextB;
#endif
if (fq->cb.dqrr_prepare)
fq->cb.dqrr_prepare(shadow[rx_number],
@@ -1114,6 +1155,14 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
return rx_number;
}
+void qman_clear_irq(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ qm_isr_status_clear(&p->p, clear);
+}
+
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs)
{
@@ -1143,7 +1192,6 @@ u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
*shadow = *dq;
dq = shadow;
shadow->fqid = be32_to_cpu(shadow->fqid);
- shadow->contextB = be32_to_cpu(shadow->contextB);
shadow->seqnum = be16_to_cpu(shadow->seqnum);
hw_fd_to_cpu(&shadow->fd);
#endif
@@ -1208,7 +1256,6 @@ struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
*shadow = *dq;
dq = shadow;
shadow->fqid = be32_to_cpu(shadow->fqid);
- shadow->contextB = be32_to_cpu(shadow->contextB);
shadow->seqnum = be16_to_cpu(shadow->seqnum);
hw_fd_to_cpu(&shadow->fd);
#endif
@@ -1504,7 +1551,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- mcc->initfq.fqd.context_b = fq->key;
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
#else
mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
#endif
@@ -2186,11 +2233,6 @@ int qman_enqueue_multi(struct qman_fq *fq,
/* try to send as many frames as possible */
while (eqcr->available && frames_to_send--) {
eq->fqid = fq->fqid_le;
-#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- eq->tag = cpu_to_be32(fq->key);
-#else
- eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
-#endif
eq->fd.opaque_addr = fd->opaque_addr;
eq->fd.addr = cpu_to_be40(fd->addr);
eq->fd.status = cpu_to_be32(fd->status);
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
index f6ecd6b2..ba153396 100644
--- a/drivers/bus/dpaa/base/qbman/qman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -113,6 +113,11 @@ static int fsl_qman_portal_finish(void)
return ret;
}
+int qman_thread_fd(void)
+{
+ return qmfd;
+}
+
int qman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
@@ -135,7 +140,7 @@ void qman_thread_irq(void)
* rather than breaking that encapsulation I am simply hard-coding the
* offset to the inhibit register here.
*/
- out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+ out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0);
}
struct qman_portal *fsl_qman_portal_create(void)
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 16fabd1b..203f60dc 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -34,6 +34,7 @@
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
+#include <dpaax_iova_table.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -46,7 +47,7 @@ int dpaa_logtype_mempool;
int dpaa_logtype_pmd;
int dpaa_logtype_eventdev;
-struct rte_dpaa_bus rte_dpaa_bus;
+static struct rte_dpaa_bus rte_dpaa_bus;
struct netcfg_info *dpaa_netcfg;
/* define a variable to hold the portal_key, once created.*/
@@ -165,6 +166,8 @@ dpaa_create_device_list(void)
goto cleanup;
}
+ dev->device.bus = &rte_dpaa_bus.bus;
+
cfg = &dpaa_netcfg->port_cfg[i];
fman_intf = cfg->fman_if;
@@ -546,6 +549,9 @@ rte_dpaa_bus_probe(void)
fclose(svr_file);
}
+ /* And initialize the PA->VA translation table */
+ dpaax_iova_table_populate();
+
/* For each registered driver, and device, call the driver->probe */
TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
@@ -553,6 +559,9 @@ rte_dpaa_bus_probe(void)
if (ret)
continue;
+ if (rte_dev_is_probed(&dev->device))
+ continue;
+
if (!drv->probe ||
(dev->device.devargs &&
dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
@@ -563,8 +572,12 @@ rte_dpaa_bus_probe(void)
dev->device.devargs->policy ==
RTE_DEV_WHITELISTED)) {
ret = drv->probe(drv, dev);
- if (ret)
+ if (ret) {
DPAA_BUS_ERR("Unable to probe.\n");
+ } else {
+ dev->driver = drv;
+ dev->device.driver = &drv->driver;
+ }
}
break;
}
@@ -611,7 +624,7 @@ rte_dpaa_get_iommu_class(void)
return RTE_IOVA_PA;
}
-struct rte_dpaa_bus rte_dpaa_bus = {
+static struct rte_dpaa_bus rte_dpaa_bus = {
.bus = {
.scan = rte_dpaa_bus_scan,
.probe = rte_dpaa_bus_probe,
diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h
index 92241d23..41226577 100644
--- a/drivers/bus/dpaa/include/compat.h
+++ b/drivers/bus/dpaa/include/compat.h
@@ -57,8 +57,9 @@
#ifndef __packed
#define __packed __rte_packed
#endif
+#ifndef noinline
#define noinline __attribute__((noinline))
-
+#endif
#define L1_CACHE_BYTES 64
#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
#define __stringify_1(x) #x
@@ -75,20 +76,25 @@
printf(fmt, ##args); \
fflush(stdout); \
} while (0)
-
+#ifndef pr_crit
#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#endif
+#ifndef pr_err
#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#endif
+#ifndef pr_warn
#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#endif
+#ifndef pr_info
#define pr_info(fmt, args...) prflush(fmt, ##args)
-
-#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
-#ifdef pr_debug
-#undef pr_debug
#endif
+#ifndef pr_debug
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
#define pr_debug(fmt, args...) printf(fmt, ##args)
#else
#define pr_debug(fmt, args...) {}
#endif
+#endif
#define DPAA_BUG_ON(x) RTE_ASSERT(x)
@@ -256,7 +262,9 @@ __bswap_24(uint32_t x)
#define be16_to_cpu(x) rte_be_to_cpu_16(x)
#define cpu_to_be64(x) rte_cpu_to_be_64(x)
+#if !defined(cpu_to_be32)
#define cpu_to_be32(x) rte_cpu_to_be_32(x)
+#endif
#define cpu_to_be16(x) rte_cpu_to_be_16(x)
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
diff --git a/drivers/bus/dpaa/include/fsl_fman_crc64.h b/drivers/bus/dpaa/include/fsl_fman_crc64.h
index bf162f3a..08ad6304 100644
--- a/drivers/bus/dpaa/include/fsl_fman_crc64.h
+++ b/drivers/bus/dpaa/include/fsl_fman_crc64.h
@@ -42,9 +42,9 @@ struct fman_crc64_t {
uint64_t initial;
uint64_t table[1 << 8];
};
-extern struct fman_crc64_t FMAN_CRC64_ECMA_182;
+extern struct fman_crc64_t fman_crc64_ecma_182;
#define DECLARE_FMAN_CRC64_TABLE() \
-struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
+struct fman_crc64_t fman_crc64_ecma_182 = { \
0xFFFFFFFFFFFFFFFFULL, \
{ \
0x0000000000000000ULL, 0xb32e4cbe03a75f6fULL, \
@@ -183,7 +183,7 @@ struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
*/
static inline uint64_t fman_crc64_init(void)
{
- return FMAN_CRC64_ECMA_182.initial;
+ return fman_crc64_ecma_182.initial;
}
/* Updates the CRC with arbitrary data */
@@ -192,7 +192,7 @@ static inline uint64_t fman_crc64_update(uint64_t crc,
{
uint8_t *p = data;
while (len--)
- crc = FMAN_CRC64_ECMA_182.table[(crc ^ *(p++)) & 0xff] ^
+ crc = fman_crc64_ecma_182.table[(crc ^ *(p++)) & 0xff] ^
(crc >> 8);
return crc;
}
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index b18cf037..e4384149 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1316,6 +1316,26 @@ u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs);
/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_remove(u32 bits);
+
+/**
* qman_affine_channel - return the channel ID of an portal
* @cpu: the cpu whose affine portal is the subject of the query
*
diff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h
index e1836175..ec1ab7ce 100644
--- a/drivers/bus/dpaa/include/fsl_usd.h
+++ b/drivers/bus/dpaa/include/fsl_usd.h
@@ -55,6 +55,10 @@ int qman_free_raw_portal(struct dpaa_raw_portal *portal);
int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
int bman_free_raw_portal(struct dpaa_raw_portal *portal);
+/* Obtain thread-local UIO file-descriptors */
+int qman_thread_fd(void);
+int bman_thread_fd(void);
+
/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
* line before notifying us, and this post-processing re-enables it once
* processing is complete. As such, it is essential to call this before going
@@ -63,6 +67,8 @@ int bman_free_raw_portal(struct dpaa_raw_portal *portal);
void qman_thread_irq(void);
void bman_thread_irq(void);
+void qman_clear_irq(void);
+
/* Global setup */
int qman_global_init(void);
int bman_global_init(void);
diff --git a/drivers/bus/dpaa/meson.build b/drivers/bus/dpaa/meson.build
index d10b62c0..1fcb4e91 100644
--- a/drivers/bus/dpaa/meson.build
+++ b/drivers/bus/dpaa/meson.build
@@ -1,11 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
-deps += ['eventdev']
+deps += ['common_dpaax', 'eventdev']
sources = files('base/fman/fman.c',
'base/fman/fman_hw.c',
'base/fman/netcfg_layer.c',
@@ -26,4 +28,3 @@ if cc.has_argument('-Wno-cast-qual')
endif
includes += include_directories('include', 'base/qbman')
-cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index 7d6d6243..70076c7a 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -95,10 +95,24 @@ DPDK_18.02 {
DPDK_18.08 {
global:
-
fman_if_get_sg_enable;
fman_if_set_sg;
of_get_mac_address;
local: *;
} DPDK_18.02;
+
+DPDK_18.11 {
+ global:
+ bman_thread_irq;
+ fman_if_get_sg_enable;
+ fman_if_set_sg;
+ qman_clear_irq;
+
+ qman_irqsource_add;
+ qman_irqsource_remove;
+ qman_thread_fd;
+ qman_thread_irq;
+
+ local: *;
+} DPDK_18.08;
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 15dc6a4a..1d580a00 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -8,6 +8,7 @@
#include <rte_bus.h>
#include <rte_mempool.h>
+#include <dpaax_iova_table.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -110,6 +111,11 @@ extern struct dpaa_memseg_list rte_dpaa_memsegs;
static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
{
struct dpaa_memseg *ms;
+ void *va;
+
+ va = dpaax_iova_table_get_va(paddr);
+ if (likely(va != NULL))
+ return va;
/* Check if the address is already part of the memseg list internally
* maintained by the dpaa driver.
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index 515d0f53..218d9bd2 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -19,12 +19,13 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_common_dpaax
# versioning export map
EXPORT_MAP := rte_bus_fslmc_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
qbman/qbman_portal.c \
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index d2900edc..89af9385 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -20,6 +20,8 @@
#include <fslmc_vfio.h>
#include "fslmc_logs.h"
+#include <dpaax_iova_table.h>
+
int dpaa2_logtype_bus;
#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
@@ -161,6 +163,8 @@ scan_one_fslmc_device(char *dev_name)
return -ENOMEM;
}
+ dev->device.bus = &rte_fslmc_bus.bus;
+
/* Parse the device name and ID */
t_ptr = strtok(dup_dev_name, ".");
if (!t_ptr) {
@@ -375,6 +379,19 @@ rte_fslmc_probe(void)
probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+ /* In case of PA, the FD addresses returned by qbman APIs are physical
+ * addresses, which need conversion into equivalent VA address for
+ * rte_mbuf. For that, a table (a serial array, in memory) is used to
+ * increase translation efficiency.
+ * This has to be done before probe as some device initialization
+ * (during) probe allocate memory (dpaa2_sec) which needs to be pinned
+ * to this table.
+ *
+ * Error is ignored as relevant logs are handled within dpaax and
+ * handling for unavailable dpaax table too is transparent to caller.
+ */
+ dpaax_iova_table_populate();
+
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
ret = rte_fslmc_match(drv, dev);
@@ -384,6 +401,9 @@ rte_fslmc_probe(void)
if (!drv->probe)
continue;
+ if (rte_dev_is_probed(&dev->device))
+ continue;
+
if (dev->device.devargs &&
dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
@@ -396,8 +416,12 @@ rte_fslmc_probe(void)
dev->device.devargs->policy ==
RTE_DEV_WHITELISTED)) {
ret = drv->probe(drv, dev);
- if (ret)
+ if (ret) {
DPAA2_BUS_ERR("Unable to probe");
+ } else {
+ dev->driver = drv;
+ dev->device.driver = &drv->driver;
+ }
}
break;
}
@@ -450,6 +474,11 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
fslmc_bus = driver->fslmc_bus;
+ /* Cleanup the PA->VA Translation table; From whereever this function
+ * is called from.
+ */
+ dpaax_iova_table_depopulate();
+
TAILQ_REMOVE(&fslmc_bus->driver_list, driver, next);
/* Update Bus references */
driver->fslmc_bus = NULL;
@@ -490,6 +519,10 @@ rte_dpaa2_get_iommu_class(void)
if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
return RTE_IOVA_DC;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ return RTE_IOVA_PA;
+#endif
+
/* check if all devices on the bus support Virtual addressing or not */
has_iova_va = fslmc_all_device_support_iova();
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 4c2cd2a8..493b6e5b 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -221,6 +221,13 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
"alloc" : "dealloc",
va, virt_addr, iova_addr, map_len);
+ /* iova_addr may be set to RTE_BAD_IOVA */
+ if (iova_addr == RTE_BAD_IOVA) {
+ DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
+ cur_len += map_len;
+ continue;
+ }
+
if (type == RTE_MEM_EVENT_ALLOC)
ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
else
@@ -318,11 +325,15 @@ fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
static int
fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
- const struct rte_memseg *ms, void *arg)
+ const struct rte_memseg *ms, void *arg)
{
int *n_segs = arg;
int ret;
+ /* if IOVA address is invalid, skip */
+ if (ms->iova == RTE_BAD_IOVA)
+ return 0;
+
ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
if (ret)
DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index 0215d22d..d9103409 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -248,6 +248,16 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
index ff366bfa..95edae9d 100644
--- a/drivers/bus/fslmc/mc/dpci.c
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -265,6 +265,15 @@ int dpci_reset(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpci_get_attributes() - Retrieve DPCI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -292,6 +301,94 @@ int dpci_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
+/**
+ * dpci_get_peer_attributes() - Retrieve peer DPCI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @attr: Returned peer attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_peer_attr *attr)
+{
+ struct dpci_rsp_get_peer_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_PEER_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_peer_attr *)cmd.params;
+ attr->peer_id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+/**
+ * dpci_get_link_state() - Retrieve the DPCI link state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @up: Returned link state; returns '1' if link is up, '0' otherwise
+ *
+ * DPCI can be connected to another DPCI, together they
+ * create a 'link'. In order to use the DPCI Tx and Rx queues,
+ * both objects must be enabled.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *up)
+{
+ struct dpci_rsp_get_link_state *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_link_state *)cmd.params;
+ *up = dpci_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
+ * dpci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation; use
+ * DPCI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -314,6 +411,9 @@ int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
dpci_set_field(cmd_params->dest_type,
DEST_TYPE,
cfg->dest_cfg.dest_type);
+ dpci_set_field(cmd_params->dest_type,
+ ORDER_PRESERVATION,
+ cfg->order_preservation_en);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -438,3 +538,100 @@ int dpci_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpci_set_opr() - Set Order Restoration configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @index: The queue index
+ * @options: Configuration mode options
+ * can be OPR_OPT_CREATE or OPR_OPT_RETIRE
+ * @cfg: Configuration options for the OPR
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg)
+{
+ struct dpci_cmd_set_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_set_opr *)cmd.params;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->oloe = cfg->oloe;
+ cmd_params->oeane = cfg->oeane;
+ cmd_params->olws = cfg->olws;
+ cmd_params->oa = cfg->oa;
+ cmd_params->oprrws = cfg->oprrws;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpci_get_opr() - Retrieve Order Restoration config and query.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @index: The queue index
+ * @cfg: Returned OPR configuration
+ * @qry: Returned OPR query
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry)
+{
+ struct dpci_rsp_get_opr *rsp_params;
+ struct dpci_cmd_get_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpci_cmd_get_opr *)cmd.params;
+ cmd_params->index = index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpci_rsp_get_opr *)cmd.params;
+ cfg->oloe = rsp_params->oloe;
+ cfg->oeane = rsp_params->oeane;
+ cfg->olws = rsp_params->olws;
+ cfg->oa = rsp_params->oa;
+ cfg->oprrws = rsp_params->oprrws;
+ qry->rip = dpci_get_field(rsp_params->flags, RIP);
+ qry->enable = dpci_get_field(rsp_params->flags, OPR_ENABLE);
+ qry->nesn = le16_to_cpu(rsp_params->nesn);
+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
+ qry->tseq_nlis = dpci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
+ qry->hseq_nlis = dpci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
index 3f6e04b9..92bd2651 100644
--- a/drivers/bus/fslmc/mc/dpcon.c
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -296,6 +296,36 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
}
/**
+ * dpcon_set_notification() - Set DPCON notification destination
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @cfg: Notification parameters
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_notification_cfg *cfg)
+{
+ struct dpcon_cmd_set_notification *dpcon_cmd;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
+ cmd_flags,
+ token);
+ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
+ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
+ dpcon_cmd->priority = cfg->priority;
+ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpcon_get_api_version - Get Data Path Concentrator API version
* @mc_io: Pointer to MC portal's DPCON object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c
index 528889df..dcb9d516 100644
--- a/drivers/bus/fslmc/mc/dpdmai.c
+++ b/drivers/bus/fslmc/mc/dpdmai.c
@@ -113,6 +113,7 @@ int dpdmai_create(struct fsl_mc_io *mc_io,
cmd_flags,
dprc_token);
cmd_params = (struct dpdmai_cmd_create *)cmd.params;
+ cmd_params->num_queues = cfg->num_queues;
cmd_params->priorities[0] = cfg->priorities[0];
cmd_params->priorities[1] = cfg->priorities[1];
@@ -297,6 +298,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
attr->num_of_priorities = rsp_params->num_of_priorities;
+ attr->num_of_queues = rsp_params->num_of_queues;
return 0;
}
@@ -306,6 +308,8 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: Rx queue index. Accepted values are form 0 to num_queues
+ * parameter provided in dpdmai_create
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation; use
* DPDMAI_ALL_QUEUES to configure all Rx queues
@@ -317,6 +321,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
const struct dpdmai_rx_queue_cfg *cfg)
{
@@ -331,6 +336,7 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
cmd_params->dest_priority = cfg->dest_cfg.priority;
cmd_params->priority = priority;
+ cmd_params->queue_idx = queue_idx;
cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
cmd_params->options = cpu_to_le32(cfg->options);
dpdmai_set_field(cmd_params->dest_type,
@@ -346,6 +352,8 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: Rx queue index. Accepted values are form 0 to num_queues
+ * parameter provided in dpdmai_create
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @attr: Returned Rx queue attributes
@@ -355,6 +363,7 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_rx_queue_attr *attr)
{
@@ -369,6 +378,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
cmd_params->priority = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -392,6 +402,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: Tx queue index. Accepted values are form 0 to num_queues
+ * parameter provided in dpdmai_create
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @attr: Returned Tx queue attributes
@@ -401,6 +413,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_tx_queue_attr *attr)
{
@@ -415,6 +428,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
cmd_params->priority = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index 966277cc..a3382ed1 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -268,6 +268,15 @@ int dpio_reset(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
int dpio_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index 11183626..9d405b42 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -82,6 +82,7 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
/**
* BPSCN write will attempt to allocate into a cache (coherent write)
*/
+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
int dpbp_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
index 18402ced..55c9fc9b 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -9,13 +9,15 @@
/* DPBP Version */
#define DPBP_VER_MAJOR 3
-#define DPBP_VER_MINOR 3
+#define DPBP_VER_MINOR 4
/* Command versioning */
#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_VERSION_2 2
#define DPBP_CMD_ID_OFFSET 4
#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+#define DPBP_CMD_V2(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_VERSION_2)
/* Command IDs */
#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
@@ -37,8 +39,8 @@
#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
-#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x1b0)
-#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x1b1)
+#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD_V2(0x1b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD_V2(0x1b1)
#define DPBP_CMDID_GET_FREE_BUFFERS_NUM DPBP_CMD(0x1b2)
@@ -68,8 +70,8 @@ struct dpbp_cmd_set_notifications {
uint32_t depletion_exit;
uint32_t surplus_entry;
uint32_t surplus_exit;
- uint16_t options;
- uint16_t pad[3];
+ uint32_t options;
+ uint16_t pad[2];
uint64_t message_ctx;
uint64_t message_iova;
};
@@ -79,8 +81,8 @@ struct dpbp_rsp_get_notifications {
uint32_t depletion_exit;
uint32_t surplus_entry;
uint32_t surplus_exit;
- uint16_t options;
- uint16_t pad[3];
+ uint32_t options;
+ uint16_t pad[2];
uint64_t message_ctx;
uint64_t message_iova;
};
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
index f69ed3f3..9af9097e 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -6,6 +6,8 @@
#ifndef __FSL_DPCI_H
#define __FSL_DPCI_H
+#include <fsl_dpopr.h>
+
/* Data Path Communication Interface API
* Contains initialization APIs and runtime control APIs for DPCI
*/
@@ -17,7 +19,7 @@ struct fsl_mc_io;
/**
* Maximum number of Tx/Rx priorities per DPCI object
*/
-#define DPCI_PRIO_NUM 2
+#define DPCI_PRIO_NUM 4
/**
* Indicates an invalid frame queue
@@ -107,6 +109,27 @@ int dpci_get_attributes(struct fsl_mc_io *mc_io,
struct dpci_attr *attr);
/**
+ * struct dpci_peer_attr - Structure representing the peer DPCI attributes
+ * @peer_id: DPCI peer id; if no peer is connected returns (-1)
+ * @num_of_priorities: The pper's number of receive priorities; determines the
+ * number of transmit priorities for the local DPCI object
+ */
+struct dpci_peer_attr {
+ int peer_id;
+ uint8_t num_of_priorities;
+};
+
+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_peer_attr *attr);
+
+int dpci_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *up);
+
+/**
* enum dpci_dest - DPCI destination types
* @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
* and does not generate FQDAN notifications; user is
@@ -154,6 +177,11 @@ struct dpci_dest_cfg {
#define DPCI_QUEUE_OPT_DEST 0x00000002
/**
+ * Set the queue to hold active mode.
+ */
+#define DPCI_QUEUE_OPT_HOLD_ACTIVE 0x00000004
+
+/**
* struct dpci_rx_queue_cfg - Structure representing RX queue configuration
* @options: Flags representing the suggested modifications to the queue;
* Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
@@ -163,11 +191,14 @@ struct dpci_dest_cfg {
* 'options'
* @dest_cfg: Queue destination parameters;
* valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPCI_QUEUE_OPT_HOLD_ACTIVE' is contained in 'options'
*/
struct dpci_rx_queue_cfg {
uint32_t options;
uint64_t user_ctx;
struct dpci_dest_cfg dest_cfg;
+ int order_preservation_en;
};
int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
@@ -217,4 +248,18 @@ int dpci_get_api_version(struct fsl_mc_io *mc_io,
uint16_t *major_ver,
uint16_t *minor_ver);
+int dpci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg);
+
+int dpci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry);
+
#endif /* __FSL_DPCI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
index 634248ac..92b85a82 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -8,7 +8,7 @@
/* DPCI Version */
#define DPCI_VER_MAJOR 3
-#define DPCI_VER_MINOR 3
+#define DPCI_VER_MINOR 4
#define DPCI_CMD_BASE_VERSION 1
#define DPCI_CMD_BASE_VERSION_V2 2
@@ -35,6 +35,8 @@
#define DPCI_CMDID_GET_PEER_ATTR DPCI_CMD_V1(0x0e2)
#define DPCI_CMDID_GET_RX_QUEUE DPCI_CMD_V1(0x0e3)
#define DPCI_CMDID_GET_TX_QUEUE DPCI_CMD_V1(0x0e4)
+#define DPCI_CMDID_SET_OPR DPCI_CMD_V1(0x0e5)
+#define DPCI_CMDID_GET_OPR DPCI_CMD_V1(0x0e6)
/* Macros for accessing command fields smaller than 1byte */
#define DPCI_MASK(field) \
@@ -90,6 +92,8 @@ struct dpci_rsp_get_link_state {
#define DPCI_DEST_TYPE_SHIFT 0
#define DPCI_DEST_TYPE_SIZE 4
+#define DPCI_ORDER_PRESERVATION_SHIFT 4
+#define DPCI_ORDER_PRESERVATION_SIZE 1
struct dpci_cmd_set_rx_queue {
uint32_t dest_id;
@@ -128,5 +132,61 @@ struct dpci_rsp_get_api_version {
uint16_t minor;
};
+struct dpci_cmd_set_opr {
+ uint16_t pad0;
+ uint8_t index;
+ uint8_t options;
+ uint8_t pad1[7];
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+};
+
+struct dpci_cmd_get_opr {
+ uint16_t pad;
+ uint8_t index;
+};
+
+#define DPCI_RIP_SHIFT 0
+#define DPCI_RIP_SIZE 1
+#define DPCI_OPR_ENABLE_SHIFT 1
+#define DPCI_OPR_ENABLE_SIZE 1
+#define DPCI_TSEQ_NLIS_SHIFT 0
+#define DPCI_TSEQ_NLIS_SIZE 1
+#define DPCI_HSEQ_NLIS_SHIFT 0
+#define DPCI_HSEQ_NLIS_SIZE 1
+
+struct dpci_rsp_get_opr {
+ uint64_t pad0;
+ /* from LSB: rip:1 enable:1 */
+ uint8_t flags;
+ uint16_t pad1;
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+ uint16_t nesn;
+ uint16_t pad8;
+ uint16_t ndsn;
+ uint16_t pad2;
+ uint16_t ea_tseq;
+ /* only the LSB */
+ uint8_t tseq_nlis;
+ uint8_t pad3;
+ uint16_t ea_hseq;
+ /* only the LSB */
+ uint8_t hseq_nlis;
+ uint8_t pad4;
+ uint16_t ea_hptr;
+ uint16_t pad5;
+ uint16_t ea_tptr;
+ uint16_t pad6;
+ uint16_t opr_vid;
+ uint16_t pad7;
+ uint16_t opr_id;
+};
#pragma pack(pop)
#endif /* _FSL_DPCI_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
index 36dd5f3c..fc0430dc 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -81,6 +81,25 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
uint16_t token,
struct dpcon_attr *attr);
+/**
+ * struct dpcon_notification_cfg - Structure representing notification params
+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
+ * @priority: Priority selection within the DPIO channel; valid values
+ * are 0-7, depending on the number of priorities in that channel
+ * @user_ctx: User context value provided with each CDAN message
+ */
+struct dpcon_notification_cfg {
+ int dpio_id;
+ uint8_t priority;
+ uint64_t user_ctx;
+};
+
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_notification_cfg *cfg);
+
int dpcon_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h
index 03e46ec1..40469cc1 100644
--- a/drivers/bus/fslmc/mc/fsl_dpdmai.h
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -39,6 +39,7 @@ int dpdmai_close(struct fsl_mc_io *mc_io,
* should be configured with 0
*/
struct dpdmai_cfg {
+ uint8_t num_queues;
uint8_t priorities[DPDMAI_PRIO_NUM];
};
@@ -78,6 +79,7 @@ int dpdmai_reset(struct fsl_mc_io *mc_io,
struct dpdmai_attr {
int id;
uint8_t num_of_priorities;
+ uint8_t num_of_queues;
};
int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
@@ -149,6 +151,7 @@ struct dpdmai_rx_queue_cfg {
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
const struct dpdmai_rx_queue_cfg *cfg);
@@ -168,6 +171,7 @@ struct dpdmai_rx_queue_attr {
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_rx_queue_attr *attr);
@@ -183,6 +187,7 @@ struct dpdmai_tx_queue_attr {
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
+ uint8_t queue_idx,
uint8_t priority,
struct dpdmai_tx_queue_attr *attr);
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
index 618e19ea..7e122de4 100644
--- a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
@@ -7,30 +7,32 @@
/* DPDMAI Version */
#define DPDMAI_VER_MAJOR 3
-#define DPDMAI_VER_MINOR 2
+#define DPDMAI_VER_MINOR 3
/* Command versioning */
#define DPDMAI_CMD_BASE_VERSION 1
+#define DPDMAI_CMD_VERSION_2 2
#define DPDMAI_CMD_ID_OFFSET 4
#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
+#define DPDMAI_CMD_V2(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_VERSION_2)
/* Command IDs */
#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800)
#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E)
-#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMD_V2(0x90E)
#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E)
#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E)
#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002)
#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003)
-#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD_V2(0x004)
#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005)
#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006)
-#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0)
-#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1)
-#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2)
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD_V2(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD_V2(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD_V2(0x1A2)
/* Macros for accessing command fields smaller than 1byte */
#define DPDMAI_MASK(field) \
@@ -47,7 +49,7 @@ struct dpdmai_cmd_open {
};
struct dpdmai_cmd_create {
- uint8_t pad;
+ uint8_t num_queues;
uint8_t priorities[2];
};
@@ -66,6 +68,7 @@ struct dpdmai_rsp_is_enabled {
struct dpdmai_rsp_get_attr {
uint32_t id;
uint8_t num_of_priorities;
+ uint8_t num_of_queues;
};
#define DPDMAI_DEST_TYPE_SHIFT 0
@@ -77,7 +80,7 @@ struct dpdmai_cmd_set_rx_queue {
uint8_t priority;
/* from LSB: dest_type:4 */
uint8_t dest_type;
- uint8_t pad;
+ uint8_t queue_idx;
uint64_t user_ctx;
uint32_t options;
};
@@ -85,6 +88,7 @@ struct dpdmai_cmd_set_rx_queue {
struct dpdmai_cmd_get_queue {
uint8_t pad[5];
uint8_t priority;
+ uint8_t queue_idx;
};
struct dpdmai_rsp_get_rx_queue {
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
index afaf9b71..8559bef8 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -18,7 +18,7 @@ struct fsl_mc_io;
* Management Complex firmware version information
*/
#define MC_VER_MAJOR 10
-#define MC_VER_MINOR 3
+#define MC_VER_MINOR 10
/**
* struct mc_version
diff --git a/drivers/bus/fslmc/mc/fsl_dpopr.h b/drivers/bus/fslmc/mc/fsl_dpopr.h
new file mode 100644
index 00000000..fd727e01
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpopr.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2018 NXP
+ *
+ */
+#ifndef __FSL_DPOPR_H_
+#define __FSL_DPOPR_H_
+
+/** @addtogroup dpopr Data Path Order Restoration API
+ * Contains initialization APIs and runtime APIs for the Order Restoration
+ * @{
+ */
+
+/** Order Restoration properties */
+
+/**
+ * Create a new Order Point Record option
+ */
+#define OPR_OPT_CREATE 0x1
+/**
+ * Retire an existing Order Point Record option
+ */
+#define OPR_OPT_RETIRE 0x2
+
+/**
+ * struct opr_cfg - Structure representing OPR configuration
+ * @oprrws: Order point record (OPR) restoration window size (0 to 5)
+ * 0 - Window size is 32 frames.
+ * 1 - Window size is 64 frames.
+ * 2 - Window size is 128 frames.
+ * 3 - Window size is 256 frames.
+ * 4 - Window size is 512 frames.
+ * 5 - Window size is 1024 frames.
+ *@oa: OPR auto advance NESN window size (0 disabled, 1 enabled)
+ *@olws: OPR acceptable late arrival window size (0 to 3)
+ * 0 - Disabled. Late arrivals are always rejected.
+ * 1 - Window size is 32 frames.
+ * 2 - Window size is the same as the OPR restoration
+ * window size configured in the OPRRWS field.
+ * 3 - Window size is 8192 frames.
+ * Late arrivals are always accepted.
+ *@oeane: Order restoration list (ORL) resource exhaustion
+ * advance NESN enable (0 disabled, 1 enabled)
+ *@oloe: OPR loose ordering enable (0 disabled, 1 enabled)
+ */
+struct opr_cfg {
+ uint8_t oprrws;
+ uint8_t oa;
+ uint8_t olws;
+ uint8_t oeane;
+ uint8_t oloe;
+};
+
+/**
+ * struct opr_qry - Structure representing OPR configuration
+ * @enable: Enabled state
+ * @rip: Retirement In Progress
+ * @ndsn: Next dispensed sequence number
+ * @nesn: Next expected sequence number
+ * @ea_hseq: Early arrival head sequence number
+ * @hseq_nlis: HSEQ not last in sequence
+ * @ea_tseq: Early arrival tail sequence number
+ * @tseq_nlis: TSEQ not last in sequence
+ * @ea_tptr: Early arrival tail pointer
+ * @ea_hptr: Early arrival head pointer
+ * @opr_id: Order Point Record ID
+ * @opr_vid: Order Point Record Virtual ID
+ */
+struct opr_qry {
+ char enable;
+ char rip;
+ uint16_t ndsn;
+ uint16_t nesn;
+ uint16_t ea_hseq;
+ char hseq_nlis;
+ uint16_t ea_tseq;
+ char tseq_nlis;
+ uint16_t ea_tptr;
+ uint16_t ea_hptr;
+ uint16_t opr_id;
+ uint16_t opr_vid;
+};
+
+#endif /* __FSL_DPOPR_H_ */
diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build
index 22a56a6f..4b052157 100644
--- a/drivers/bus/fslmc/meson.build
+++ b/drivers/bus/fslmc/meson.build
@@ -1,11 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
-deps += ['eventdev', 'kvargs']
+deps += ['common_dpaax', 'eventdev', 'kvargs']
sources = files('fslmc_bus.c',
'fslmc_vfio.c',
'mc/dpbp.c',
@@ -24,4 +26,3 @@ sources = files('fslmc_bus.c',
allow_experimental_apis = true
includes += include_directories('mc', 'qbman/include', 'portal')
-cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index 39c5adf9..db49d637 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -28,6 +28,13 @@
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
+/* List of all the memseg information locally maintained in dpaa2 driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa2_memseg_list rte_dpaa2_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+
TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
static struct dpbp_dev_list dpbp_dev_list
= TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 99f70be1..ce069984 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
#include <unistd.h>
@@ -53,6 +53,11 @@ static uint32_t io_space_count;
/* Variable to store DPAA2 platform type */
uint32_t dpaa2_svr_family;
+/* Variable to store DPAA2 DQRR size */
+uint8_t dpaa2_dqrr_size;
+/* Variable to store DPAA2 EQCR size */
+uint8_t dpaa2_eqcr_size;
+
/*Stashing Macros default for LS208x*/
static int dpaa2_core_cluster_base = 0x04;
static int dpaa2_cluster_sz = 2;
@@ -125,7 +130,7 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
cpu_mask, token);
ret = system(command);
if (ret < 0)
- DPAA2_BUS_WARN(
+ DPAA2_BUS_DEBUG(
"Failed to affine interrupts on respective core");
else
DPAA2_BUS_DEBUG(" %s command is executed", command);
@@ -178,68 +183,6 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
#endif
static int
-configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
-{
- struct qbman_swp_desc p_des;
- struct dpio_attr attr;
-
- dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
- if (!dpio_dev->dpio) {
- DPAA2_BUS_ERR("Memory allocation failure");
- return -1;
- }
-
- dpio_dev->dpio->regs = dpio_dev->mc_portal;
- if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
- &dpio_dev->token)) {
- DPAA2_BUS_ERR("Failed to allocate IO space");
- free(dpio_dev->dpio);
- return -1;
- }
-
- if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- DPAA2_BUS_ERR("Failed to reset dpio");
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- DPAA2_BUS_ERR("Failed to Enable dpio");
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
- dpio_dev->token, &attr)) {
- DPAA2_BUS_ERR("DPIO Get attribute failed");
- dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- /* Configure & setup SW portal */
- p_des.block = NULL;
- p_des.idx = attr.qbman_portal_id;
- p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
- p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
- p_des.irq = -1;
- p_des.qman_version = attr.qbman_version;
-
- dpio_dev->sw_portal = qbman_swp_init(&p_des);
- if (dpio_dev->sw_portal == NULL) {
- DPAA2_BUS_ERR("QBMan SW Portal Init failed");
- dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
- free(dpio_dev->dpio);
- return -1;
- }
-
- return 0;
-}
-
-static int
dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
{
int sdest, ret;
@@ -402,15 +345,17 @@ dpaa2_create_dpio_device(int vdev_fd,
struct vfio_device_info *obj_info,
int object_id)
{
- struct dpaa2_dpio_dev *dpio_dev;
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
+ struct qbman_swp_desc p_des;
+ struct dpio_attr attr;
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
return -1;
}
- dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
+ dpio_dev = rte_zmalloc(NULL, sizeof(struct dpaa2_dpio_dev),
RTE_CACHE_LINE_SIZE);
if (!dpio_dev) {
DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
@@ -423,45 +368,33 @@ dpaa2_create_dpio_device(int vdev_fd,
/* Using single portal for all devices */
dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
- reg_info.index = 0;
- if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- DPAA2_BUS_ERR("vfio: error getting region info");
- rte_free(dpio_dev);
- return -1;
+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
+ if (!dpio_dev->dpio) {
+ DPAA2_BUS_ERR("Memory allocation failure");
+ goto err;
}
- dpio_dev->ce_size = reg_info.size;
- dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
- PROT_WRITE | PROT_READ, MAP_SHARED,
- vdev_fd, reg_info.offset);
-
- reg_info.index = 1;
- if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- DPAA2_BUS_ERR("vfio: error getting region info");
- rte_free(dpio_dev);
- return -1;
+ dpio_dev->dpio->regs = dpio_dev->mc_portal;
+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
+ &dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to allocate IO space");
+ goto err;
}
- dpio_dev->ci_size = reg_info.size;
- dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
- PROT_WRITE | PROT_READ, MAP_SHARED,
- vdev_fd, reg_info.offset);
-
- if (configure_dpio_qbman_swp(dpio_dev)) {
- DPAA2_BUS_ERR(
- "Fail to configure the dpio qbman portal for %d",
- dpio_dev->hw_id);
- rte_free(dpio_dev);
- return -1;
+ if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to reset dpio");
+ goto err;
}
- io_space_count++;
- dpio_dev->index = io_space_count;
+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ DPAA2_BUS_ERR("Failed to Enable dpio");
+ goto err;
+ }
- if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
- DPAA2_BUS_ERR("Fail to setup interrupt for %d",
- dpio_dev->hw_id);
- rte_free(dpio_dev);
+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, &attr)) {
+ DPAA2_BUS_ERR("DPIO Get attribute failed");
+ goto err;
}
/* find the SoC type for the first time */
@@ -481,11 +414,77 @@ dpaa2_create_dpio_device(int vdev_fd,
DPAA2_BUS_DEBUG("LX2160 Platform Detected");
}
dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
+
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ dpaa2_dqrr_size = DPAA2_LX2_DQRR_RING_SIZE;
+ dpaa2_eqcr_size = DPAA2_LX2_EQCR_RING_SIZE;
+ } else {
+ dpaa2_dqrr_size = DPAA2_DQRR_RING_SIZE;
+ dpaa2_eqcr_size = DPAA2_EQCR_RING_SIZE;
+ }
+ }
+
+ if (dpaa2_svr_family == SVR_LX2160A)
+ reg_info.index = DPAA2_SWP_CENA_MEM_REGION;
+ else
+ reg_info.index = DPAA2_SWP_CENA_REGION;
+
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ goto err;
+ }
+
+ dpio_dev->ce_size = reg_info.size;
+ dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ reg_info.index = DPAA2_SWP_CINH_REGION;
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ DPAA2_BUS_ERR("vfio: error getting region info");
+ goto err;
+ }
+
+ dpio_dev->ci_size = reg_info.size;
+ dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ vdev_fd, reg_info.offset);
+
+ /* Configure & setup SW portal */
+ p_des.block = NULL;
+ p_des.idx = attr.qbman_portal_id;
+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
+ p_des.irq = -1;
+ p_des.qman_version = attr.qbman_version;
+
+ dpio_dev->sw_portal = qbman_swp_init(&p_des);
+ if (dpio_dev->sw_portal == NULL) {
+ DPAA2_BUS_ERR("QBMan SW Portal Init failed");
+ goto err;
+ }
+
+ io_space_count++;
+ dpio_dev->index = io_space_count;
+
+ if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
+ DPAA2_BUS_ERR("Fail to setup interrupt for %d",
+ dpio_dev->hw_id);
+ goto err;
}
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
return 0;
+
+err:
+ if (dpio_dev->dpio) {
+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ }
+ rte_free(dpio_dev);
+ return -1;
}
void
@@ -506,7 +505,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
q_storage->dq_storage[i] = rte_malloc(NULL,
- DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
+ dpaa2_dqrr_size * sizeof(struct qbman_result),
RTE_CACHE_LINE_SIZE);
if (!q_storage->dq_storage[i])
goto fail;
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index d593eea7..462501a2 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -30,6 +30,10 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
/* Variable to store DPAA2 platform type */
extern uint32_t dpaa2_svr_family;
+/* Variable to store DPAA2 DQRR size */
+extern uint8_t dpaa2_dqrr_size;
+/* Variable to store DPAA2 EQCR size */
+extern uint8_t dpaa2_eqcr_size;
extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 82075936..efbeebef 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
@@ -9,6 +9,7 @@
#define _DPAA2_HW_PVT_H_
#include <rte_eventdev.h>
+#include <dpaax_iova_table.h>
#include <mc/fsl_mc_sys.h>
#include <fsl_qbman_portal.h>
@@ -31,11 +32,27 @@
#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
#endif
-#define MAX_TX_RING_SLOTS 8
- /** <Maximum number of slots available in TX ring*/
+/* Maximum number of slots available in TX ring */
+#define MAX_TX_RING_SLOTS 32
-#define DPAA2_DQRR_RING_SIZE 16
- /** <Maximum number of slots available in RX ring*/
+/* Maximum number of slots available in RX ring */
+#define DPAA2_EQCR_RING_SIZE 8
+/* Maximum number of slots available in RX ring on LX2 */
+#define DPAA2_LX2_EQCR_RING_SIZE 32
+
+/* Maximum number of slots available in RX ring */
+#define DPAA2_DQRR_RING_SIZE 16
+/* Maximum number of slots available in RX ring on LX2 */
+#define DPAA2_LX2_DQRR_RING_SIZE 32
+
+/* EQCR shift to get EQCR size (2 >> 3) = 8 for LS2/LS2 */
+#define DPAA2_EQCR_SHIFT 3
+/* EQCR shift to get EQCR size for LX2 (2 >> 5) = 32 for LX2 */
+#define DPAA2_LX2_EQCR_SHIFT 5
+
+#define DPAA2_SWP_CENA_REGION 0
+#define DPAA2_SWP_CINH_REGION 1
+#define DPAA2_SWP_CENA_MEM_REGION 2
#define MC_PORTAL_INDEX 0
#define NUM_DPIO_REGIONS 2
@@ -193,6 +210,12 @@ enum qbman_fd_format {
#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
+
+#define DPAA2_RESET_FD_FLC(fd) do { \
+ (fd)->simple.flc_lo = 0; \
+ (fd)->simple.flc_hi = 0; \
+} while (0)
+
#define DPAA2_SET_FD_FLC(fd, addr) do { \
(fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
(fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
@@ -275,28 +298,26 @@ extern struct dpaa2_memseg_list rte_dpaa2_memsegs;
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
extern uint8_t dpaa2_virt_mode;
static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
-/* todo - this is costly, need to write a fast coversion routine */
+
static void *dpaa2_mem_ptov(phys_addr_t paddr)
{
- struct dpaa2_memseg *ms;
+ void *va;
if (dpaa2_virt_mode)
return (void *)(size_t)paddr;
- /* Check if the address is already part of the memseg list internally
- * maintained by the dpaa2 driver.
- */
- TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) {
- if (paddr >= ms->iova && paddr <
- ms->iova + ms->len)
- return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
- }
+ va = (void *)dpaax_iova_table_get_va(paddr);
+ if (likely(va != NULL))
+ return va;
/* If not, Fallback to full memseg list searching */
- return rte_mem_iova2virt(paddr);
+ va = rte_mem_iova2virt(paddr);
+
+ return va;
}
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
+
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
{
const struct rte_memseg *memseg;
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 7be8f54c..655bff4b 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -78,13 +78,14 @@ do { \
#define lower_32_bits(x) ((uint32_t)(x))
#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
-
#define __iomem
#define __raw_readb(p) (*(const volatile unsigned char *)(p))
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+#define dma_wmb() rte_smp_mb()
+
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
#define atomic_set(v, i) rte_atomic32_set(v, i)
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 3e63db3a..10c72e04 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -43,6 +43,15 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
/**
+ * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
+ * portal. This is required to be called if a portal moved to another core
+ * because the QBMan portal area is non coherent
+ * @p: the qbman_swp object to be invalidated
+ *
+ */
+void qbman_swp_invalidate(struct qbman_swp *p);
+
+/**
* qbman_swp_get_desc() - Get the descriptor of the given portal object.
* @p: the given portal object.
*
@@ -172,7 +181,7 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
/**
* struct qbman_result - structure for qbman dequeue response and/or
* notification.
- * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
* possible qbman dequeue result.
*/
struct qbman_result {
@@ -262,7 +271,7 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
*/
struct qbman_pull_desc {
union {
- uint32_t donot_manipulate_directly[16];
+ uint32_t dont_manipulate_directly[16];
struct pull {
uint8_t verb;
uint8_t numf;
@@ -356,6 +365,14 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
enum qbman_pull_type_e dct);
/**
+ * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
+ *
+ * @rad: 1 = Reschedule the FQ after dequeue.
+ * 0 = Allow the FQ to remain active after dequeue.
+ */
+void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
+
+/**
* qbman_swp_pull() - Issue the pull dequeue command
* @s: the software portal object.
* @d: the software portal descriptor which has been configured with
@@ -775,7 +792,7 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
/* struct qbman_eq_desc - structure of enqueue descriptor */
struct qbman_eq_desc {
union {
- uint32_t donot_manipulate_directly[8];
+ uint32_t dont_manipulate_directly[8];
struct eq {
uint8_t verb;
uint8_t dca;
@@ -796,11 +813,11 @@ struct qbman_eq_desc {
/**
* struct qbman_eq_response - structure of enqueue response
- * @donot_manipulate_directly: the 16 32bit data to represent the whole
+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
* enqueue response.
*/
struct qbman_eq_response {
- uint32_t donot_manipulate_directly[16];
+ uint32_t dont_manipulate_directly[16];
};
/**
@@ -958,6 +975,7 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
* @s: the software portal used for enqueue.
* @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
+ * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
@@ -973,7 +991,6 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
* @s: the software portal used for enqueue.
* @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
- * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
@@ -998,12 +1015,12 @@ int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
/*******************/
/**
* struct qbman_release_desc - The structure for buffer release descriptor
- * @donot_manipulate_directly: the 32bit data to represent the whole
+ * @dont_manipulate_directly: the 32bit data to represent the whole
* possible settings of qbman release descriptor.
*/
struct qbman_release_desc {
union {
- uint32_t donot_manipulate_directly[16];
+ uint32_t dont_manipulate_directly[16];
struct br {
uint8_t verb;
uint8_t reserved;
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 07145005..3380e54f 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -1,39 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
*/
+#include "qbman_sys.h"
#include "qbman_portal.h"
/* QBMan portal management command codes */
#define QBMAN_MC_ACQUIRE 0x30
#define QBMAN_WQCHAN_CONFIGURE 0x46
-/* CINH register offsets */
-#define QBMAN_CINH_SWP_EQCR_PI 0x800
-#define QBMAN_CINH_SWP_EQCR_CI 0x840
-#define QBMAN_CINH_SWP_EQAR 0x8c0
-#define QBMAN_CINH_SWP_DQPI 0xa00
-#define QBMAN_CINH_SWP_DCAP 0xac0
-#define QBMAN_CINH_SWP_SDQCR 0xb00
-#define QBMAN_CINH_SWP_RAR 0xcc0
-#define QBMAN_CINH_SWP_ISR 0xe00
-#define QBMAN_CINH_SWP_IER 0xe40
-#define QBMAN_CINH_SWP_ISDR 0xe80
-#define QBMAN_CINH_SWP_IIR 0xec0
-#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
-#define QBMAN_CINH_SWP_ITPR 0xf40
-
-/* CENA register offsets */
-#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
-#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
-#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
-#define QBMAN_CENA_SWP_CR 0x600
-#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
-#define QBMAN_CENA_SWP_VDQCR 0x780
-#define QBMAN_CENA_SWP_EQCR_CI 0x840
-
/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
@@ -83,6 +61,102 @@ enum qbman_sdqcr_fc {
#define MAX_QBMAN_PORTALS 64
static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
+/* Internal Function declaration */
+static int
+qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+static int
+qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+
+static int
+qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+static int
+qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+
+static int
+qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+
+static int
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
+
+static int
+qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
+static int
+qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
+
+const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
+const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
+
+static int
+qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers);
+static int
+qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers);
+
+/* Function pointers */
+static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+ = qbman_swp_enqueue_array_mode_direct;
+
+static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+ = qbman_swp_enqueue_ring_mode_direct;
+
+static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_direct;
+
+static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_desc_direct;
+
+static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+ = qbman_swp_pull_direct;
+
+const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
+ = qbman_swp_dqrr_next_direct;
+
+static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers)
+ = qbman_swp_release_direct;
+
/*********************************/
/* Portal constructor/destructor */
/*********************************/
@@ -104,25 +178,30 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
int ret;
uint32_t eqcr_pi;
+ uint32_t mask_size;
struct qbman_swp *p = malloc(sizeof(*p));
if (!p)
return NULL;
+
+ memset(p, 0, sizeof(struct qbman_swp));
+
p->desc = *d;
#ifdef QBMAN_CHECKING
p->mc.check = swp_mc_can_start;
#endif
p->mc.valid_bit = QB_VALID_BIT;
- p->sdq = 0;
p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ p->mr.valid_bit = QB_VALID_BIT;
atomic_set(&p->vdq.busy, 1);
p->vdq.valid_bit = QB_VALID_BIT;
- p->dqrr.next_idx = 0;
p->dqrr.valid_bit = QB_VALID_BIT;
- if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+ qman_version = p->desc.qman_version;
+ if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
p->dqrr.dqrr_size = 4;
p->dqrr.reset_bug = 1;
} else {
@@ -136,18 +215,54 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
pr_err("qbman_swp_sys_init() failed %d\n", ret);
return NULL;
}
+
+ /* Verify that the DQRRPI is 0 - if it is not the portal isn't
+ * in default state which is an error
+ */
+ if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
+ pr_err("qbman DQRR PI is not zero, portal is not clean\n");
+ free(p);
+ return NULL;
+ }
+
/* SDQCR needs to be initialized to 0 when no channels are
* being dequeued from or else the QMan HW will indicate an
* error. The values that were calculated above will be
* applied when dequeues from a specific channel are enabled.
*/
qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ if ((qman_version & 0xFFFF0000) >= QMAN_REV_5000) {
+ p->eqcr.pi_ring_size = 32;
+ qbman_swp_enqueue_array_mode_ptr =
+ qbman_swp_enqueue_array_mode_mem_back;
+ qbman_swp_enqueue_ring_mode_ptr =
+ qbman_swp_enqueue_ring_mode_mem_back;
+ qbman_swp_enqueue_multiple_ptr =
+ qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_desc_ptr =
+ qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
+ }
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_mask = (p->eqcr.pi_mask<<1) + 1;
eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
- p->eqcr.pi = eqcr_pi & 0xF;
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_mask;
p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
- p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
- p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
- p->eqcr.ci, p->eqcr.pi);
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p->eqcr.ci = qbman_cinh_read(&p->sys,
+ QBMAN_CINH_SWP_EQCR_CI) & p->eqcr.pi_mask;
+ else
+ p->eqcr.ci = qbman_cinh_read(&p->sys,
+ QBMAN_CINH_SWP_EQCR_PI) & p->eqcr.pi_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size -
+ qm_cyc_diff(p->eqcr.pi_ring_size,
+ p->eqcr.ci & (p->eqcr.pi_mask<<1),
+ p->eqcr.pi & (p->eqcr.pi_mask<<1));
portal_idx_map[p->desc.idx] = p;
return p;
@@ -229,7 +344,8 @@ int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
{
- qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
+ inhibit ? 0xffffffff : 0);
}
/***********************/
@@ -246,7 +362,10 @@ void *qbman_swp_mc_start(struct qbman_swp *p)
#ifdef QBMAN_CHECKING
QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
#endif
- ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+ else
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
#ifdef QBMAN_CHECKING
if (!ret)
p->mc.check = swp_mc_can_submit;
@@ -266,8 +385,17 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
* caller wants to OR but has forgotten to do so.
*/
QBMAN_BUG_ON((*v & cmd_verb) != *v);
- *v = cmd_verb | p->mc.valid_bit;
- qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+ clean(cmd);
+ } else {
+ *v = cmd_verb | p->mr.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
+ dma_wmb();
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
+ }
#ifdef QBMAN_CHECKING
p->mc.check = swp_mc_can_poll;
#endif
@@ -279,17 +407,34 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
#ifdef QBMAN_CHECKING
QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
#endif
- qbman_cena_invalidate_prefetch(&p->sys,
- QBMAN_CENA_SWP_RR(p->mc.valid_bit));
- ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
- /* Remove the valid-bit - command completed if the rest is non-zero */
- verb = ret[0] & ~QB_VALID_BIT;
- if (!verb)
- return NULL;
+ if ((p->desc.qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ qbman_cena_invalidate_prefetch(&p->sys,
+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ ret = qbman_cena_read(&p->sys,
+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit -
+ * command completed iff the rest is non-zero
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ } else {
+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
+ /* Command completed if the valid bit is toggled */
+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
+ return NULL;
+ /* Remove the valid-bit -
+ * command completed iff the rest is non-zero
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mr.valid_bit ^= QB_VALID_BIT;
+ }
#ifdef QBMAN_CHECKING
p->mc.check = swp_mc_can_start;
#endif
- p->mc.valid_bit ^= QB_VALID_BIT;
return ret;
}
@@ -417,13 +562,26 @@ void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
}
}
-#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_IDX(eqar) ((eqar) & 0x1f)
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
-static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd)
+static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
+ uint8_t idx)
+{
+ if (idx < 16)
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
+ QMAN_RT_MODE);
+ else
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
+ (idx - 16) * 4,
+ QMAN_RT_MODE);
+}
+
+
+static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
@@ -433,39 +591,69 @@ static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
if (!EQAR_SUCCESS(eqar))
return -EBUSY;
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], fd, sizeof(*fd));
+
/* Set the verb byte, have to substitute in the valid-bit */
- lwsync();
+ dma_wmb();
p[0] = cl[0] | EQAR_VB(eqar);
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
return 0;
}
+static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
-static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd)
+ pr_debug("EQAR=%08x\n", eqar);
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | EQAR_VB(eqar);
+ dma_wmb();
+ qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
+ return 0;
+}
+
+static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
+}
+
+static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
- uint32_t eqcr_ci;
- uint8_t diff;
+ uint32_t eqcr_ci, full_mask, half_mask;
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
- QBMAN_CENA_SWP_EQCR_CI) & 0xF;
- diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
- eqcr_ci, s->eqcr.ci);
- s->eqcr.available += diff;
- if (!diff)
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
return -EBUSY;
}
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], fd, sizeof(*fd));
lwsync();
@@ -473,16 +661,61 @@ static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
/* Set the verb byte, have to substitute in the valid-bit */
p[0] = cl[0] | s->eqcr.pi_vb;
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
s->eqcr.pi++;
- s->eqcr.pi &= 0xF;
+ s->eqcr.pi &= full_mask;
s->eqcr.available--;
- if (!(s->eqcr.pi & 7))
+ if (!(s->eqcr.pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
return 0;
}
+static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, full_mask, half_mask;
+
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return -EBUSY;
+ }
+
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ s->eqcr.pi++;
+ s->eqcr.pi &= full_mask;
+ s->eqcr.available--;
+ if (!(s->eqcr.pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ return 0;
+}
+
+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
+}
+
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct qbman_fd *fd)
{
@@ -492,27 +725,27 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
return qbman_swp_enqueue_ring_mode(s, d, fd);
}
-int qbman_swp_enqueue_multiple(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- uint32_t *flags,
- int num_frames)
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
{
- uint32_t *p;
+ uint32_t *p = NULL;
const uint32_t *cl = qb_cl(d);
- uint32_t eqcr_ci, eqcr_pi;
- uint8_t diff;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
uint64_t addr_cena;
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
- QBMAN_CENA_SWP_EQCR_CI) & 0xF;
- diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
- eqcr_ci, s->eqcr.ci);
- s->eqcr.available += diff;
- if (!diff)
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
return 0;
}
@@ -523,11 +756,10 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], &fd[i], sizeof(*fd));
eqcr_pi++;
- eqcr_pi &= 0xF;
}
lwsync();
@@ -536,7 +768,7 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
@@ -545,8 +777,7 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
- eqcr_pi &= 0xF;
- if (!(eqcr_pi & 7))
+ if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
@@ -554,35 +785,104 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((uintptr_t)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
eqcr_pi++;
- eqcr_pi &= 0xF;
}
- s->eqcr.pi = eqcr_pi;
+ s->eqcr.pi = eqcr_pi & full_mask;
return num_enqueued;
}
-int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- int num_frames)
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ return num_enqueued;
+}
+
+inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
+}
+
+static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
{
uint32_t *p;
const uint32_t *cl;
- uint32_t eqcr_ci, eqcr_pi;
- uint8_t diff;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
uint64_t addr_cena;
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
s->eqcr.ci = qbman_cena_read_reg(&s->sys,
- QBMAN_CENA_SWP_EQCR_CI) & 0xF;
- diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
- eqcr_ci, s->eqcr.ci);
- s->eqcr.available += diff;
- if (!diff)
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
return 0;
}
@@ -593,12 +893,11 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
/* Fill in the EQCR ring */
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = qb_cl(&d[i]);
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], &fd[i], sizeof(*fd));
eqcr_pi++;
- eqcr_pi &= 0xF;
}
lwsync();
@@ -607,12 +906,11 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = qb_cl(&d[i]);
p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++;
- eqcr_pi &= 0xF;
- if (!(eqcr_pi & 7))
+ if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
@@ -620,14 +918,78 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
eqcr_pi = s->eqcr.pi;
addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((uintptr_t)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
+ eqcr_pi++;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_mask>>1);
+ full_mask = s->eqcr.pi_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = qb_cl(&d[i]);
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = qb_cl(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++;
- eqcr_pi &= 0xF;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
}
- s->eqcr.pi = eqcr_pi;
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
return num_enqueued;
}
+inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
+}
/*************************/
/* Static (push) dequeue */
@@ -670,6 +1032,7 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
#define QB_VDQCR_VERB_DT_SHIFT 2
#define QB_VDQCR_VERB_RLS_SHIFT 4
#define QB_VDQCR_VERB_WAE_SHIFT 5
+#define QB_VDQCR_VERB_RAD_SHIFT 6
enum qb_pull_dt_e {
qb_pull_dt_channel,
@@ -702,7 +1065,8 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
d->pull.rsp_addr = storage_phys;
}
-void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
+ uint8_t numframes)
{
d->pull.numf = numframes - 1;
}
@@ -735,7 +1099,20 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
d->pull.dq_src = chid;
}
-int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
+{
+ if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
+ if (rad)
+ d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
+ else
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
+ } else {
+ printf("The RAD feature is not valid when RLS = 0\n");
+ }
+}
+
+static int qbman_swp_pull_direct(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
{
uint32_t *p;
uint32_t *cl = qb_cl(d);
@@ -759,6 +1136,36 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
return 0;
}
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+{
+ uint32_t *p;
+ uint32_t *cl = qb_cl(d);
+
+ if (!atomic_dec_and_test(&s->vdq.busy)) {
+ atomic_inc(&s->vdq.busy);
+ return -EBUSY;
+ }
+
+ d->pull.tok = s->sys.idx + 1;
+ s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
+ memcpy(&p[1], &cl[1], 12);
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+
+ return 0;
+}
+
+inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ return qbman_swp_pull_ptr(s, d);
+}
+
/****************/
/* Polling DQRR */
/****************/
@@ -791,7 +1198,12 @@ void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
-const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ return qbman_swp_dqrr_next_ptr(s);
+}
+
+const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
{
uint32_t verb;
uint32_t response_verb;
@@ -801,7 +1213,7 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
/* Before using valid-bit to detect if something is there, we have to
* handle the case of the DQRR reset bug...
*/
- if (unlikely(s->dqrr.reset_bug)) {
+ if (s->dqrr.reset_bug) {
/* We pick up new entries by cache-inhibited producer index,
* which means that a non-coherent mapping would require us to
* invalidate and read *only* once that PI has indicated that
@@ -833,7 +1245,8 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
}
p = qbman_cena_read_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+
verb = p->dq.verb;
/* If the valid-bit isn't of the expected polarity, nothing there. Note,
@@ -867,11 +1280,54 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
return p;
}
+const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
+{
+ uint32_t verb;
+ uint32_t response_verb;
+ uint32_t flags;
+ const struct qbman_result *p;
+
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+
+ verb = p->dq.verb;
+
+ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
+ return NULL;
+
+ /* There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
+ s->dqrr.next_idx = 0;
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+ }
+ /* If this is the final response to a volatile dequeue command
+ * indicate that the vdq is no longer busy
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & QBMAN_DQ_STAT_VOLATILE) &&
+ (flags & QBMAN_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.busy);
+ return p;
+}
+
/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
void qbman_swp_dqrr_consume(struct qbman_swp *s,
const struct qbman_result *dq)
{
- qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+ qbman_cinh_write(&s->sys,
+ QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
}
/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
@@ -884,6 +1340,7 @@ void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
/*********************************/
/* Polling user-provided storage */
/*********************************/
+
int qbman_result_has_new_result(struct qbman_swp *s,
struct qbman_result *dq)
{
@@ -898,11 +1355,11 @@ int qbman_result_has_new_result(struct qbman_swp *s,
((struct qbman_result *)dq)->dq.tok = 0;
/*
- * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
- * fact "VDQCR" shows busy doesn't mean that we hold the result that
- * makes it available. Eg. we may be looking at our 10th dequeue result,
- * having released VDQCR after the 1st result and it is now busy due to
- * some other command!
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because
+ * the fact "VDQCR" shows busy doesn't mean that we hold the result
+ * that makes it available. Eg. we may be looking at our 10th dequeue
+ * result, having released VDQCR after the 1st result and it is now
+ * busy due to some other command!
*/
if (s->vdq.storage == dq) {
s->vdq.storage = NULL;
@@ -936,11 +1393,11 @@ int qbman_check_command_complete(struct qbman_result *dq)
s = portal_idx_map[dq->dq.tok - 1];
/*
- * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
- * fact "VDQCR" shows busy doesn't mean that we hold the result that
- * makes it available. Eg. we may be looking at our 10th dequeue result,
- * having released VDQCR after the 1st result and it is now busy due to
- * some other command!
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because
+ * the fact "VDQCR" shows busy doesn't mean that we hold the result
+ * that makes it available. Eg. we may be looking at our 10th dequeue
+ * result, having released VDQCR after the 1st result and it is now
+ * busy due to some other command!
*/
if (s->vdq.storage == dq) {
s->vdq.storage = NULL;
@@ -1142,8 +1599,10 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
#define RAR_VB(rar) ((rar) & 0x80)
#define RAR_SUCCESS(rar) ((rar) & 0x100)
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const uint64_t *buffers, unsigned int num_buffers)
+static int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers,
+ unsigned int num_buffers)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
@@ -1157,22 +1616,63 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
/* Start the release command */
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */
u64_to_le32_copy(&p[2], buffers, num_buffers);
- /* Set the verb byte, have to substitute in the valid-bit and the number
- * of buffers.
+ /* Set the verb byte, have to substitute in the valid-bit and the
+ * number of buffers.
*/
lwsync();
p[0] = cl[0] | RAR_VB(rar) | num_buffers;
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
return 0;
}
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
+
+ pr_debug("RAR=%08x\n", rar);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+
+ /* Start the release command */
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ u64_to_le32_copy(&p[2], buffers, num_buffers);
+
+ /* Set the verb byte, have to substitute in the valid-bit and the
+ * number of buffers.
+ */
+ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
+ lwsync();
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
+
+ return 0;
+}
+
+inline int qbman_swp_release(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ return qbman_swp_release_ptr(s, d, buffers, num_buffers);
+}
+
/*******************/
/* Buffer acquires */
/*******************/
@@ -1214,7 +1714,7 @@ int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
/* Complete the management command */
r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
- if (unlikely(!r)) {
+ if (!r) {
pr_err("qbman: acquire from BPID %d failed, no response\n",
bpid);
return -EIO;
@@ -1224,7 +1724,7 @@ int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
/* Determine success or failure */
- if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
bpid, r->rslt);
return -EIO;
@@ -1271,7 +1771,7 @@ static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
/* Complete the management command */
r = qbman_swp_mc_complete(s, p, alt_fq_verb);
- if (unlikely(!r)) {
+ if (!r) {
pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
alt_fq_verb);
return -EIO;
@@ -1281,7 +1781,7 @@ static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
/* Determine success or failure */
- if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
fqid, alt_fq_verb, r->rslt);
return -EIO;
@@ -1362,7 +1862,7 @@ static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
/* Complete the management command */
r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
- if (unlikely(!r)) {
+ if (!r) {
pr_err("qbman: wqchan config failed, no response\n");
return -EIO;
}
@@ -1372,7 +1872,7 @@ static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
!= QBMAN_WQCHAN_CONFIGURE);
/* Determine success or failure */
- if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
pr_err("CDAN cQID %d failed: code = 0x%02x\n",
channelid, r->rslt);
return -EIO;
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h
index dbea22a1..3b0fc540 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -1,12 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
*/
+#ifndef _QBMAN_PORTAL_H_
+#define _QBMAN_PORTAL_H_
+
#include "qbman_sys.h"
#include <fsl_qbman_portal.h>
+uint32_t qman_version;
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
@@ -14,13 +19,14 @@
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((uint32_t)0x80)
+/* All QBMan command use this "Read trigger bit" encoding */
+#define QB_RT_BIT ((uint32_t)0x100)
+
/* Management command result codes */
#define QBMAN_MC_RSLT_OK 0xf0
/* QBMan DQRR size is set at runtime in qbman_portal.c */
-#define QBMAN_EQCR_SIZE 8
-
static inline uint8_t qm_cyc_diff(uint8_t ringsize, uint8_t first,
uint8_t last)
{
@@ -51,6 +57,10 @@ struct qbman_swp {
#endif
uint32_t valid_bit; /* 0x00 or 0x80 */
} mc;
+ /* Management response */
+ struct {
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mr;
/* Push dequeues */
uint32_t sdq;
/* Volatile dequeues */
@@ -87,6 +97,8 @@ struct qbman_swp {
struct {
uint32_t pi;
uint32_t pi_vb;
+ uint32_t pi_ring_size;
+ uint32_t pi_mask;
uint32_t ci;
int available;
} eqcr;
@@ -141,4 +153,16 @@ static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
* an inline) is necessary to work with different descriptor types and to work
* correctly with const and non-const inputs (and similarly-qualified outputs).
*/
-#define qb_cl(d) (&(d)->donot_manipulate_directly[0])
+#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
+
+#ifdef RTE_ARCH_ARM64
+ #define clean(p) \
+ { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
+ #define invalidate(p) \
+ { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
+#else
+ #define clean(p)
+ #define invalidate(p)
+#endif
+
+#endif
diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h
index 2bd33ea5..d41af835 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -18,11 +18,51 @@
* *not* to provide linux compatibility.
*/
+#ifndef _QBMAN_SYS_H_
+#define _QBMAN_SYS_H_
+
#include "qbman_sys_decl.h"
#define CENA_WRITE_ENABLE 0
#define CINH_WRITE_ENABLE 1
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_CR_RT 0x900
+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
+#define QBMAN_CINH_SWP_RCR_PI 0xc00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+
+/* CENA register offsets in memory-backed mode */
+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR_MEM 0x1600
+#define QBMAN_CENA_SWP_RR_MEM 0x1680
+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
+
/* Debugging assists */
static inline void __hexdump(unsigned long start, unsigned long end,
unsigned long p, size_t sz, const unsigned char *c)
@@ -125,8 +165,8 @@ struct qbman_swp_sys {
* place-holder.
*/
uint8_t *cena;
- uint8_t __iomem *addr_cena;
- uint8_t __iomem *addr_cinh;
+ uint8_t *addr_cena;
+ uint8_t *addr_cinh;
uint32_t idx;
enum qbman_eqcr_mode eqcr_mode;
};
@@ -292,13 +332,16 @@ static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
* qbman_portal.c. So use of it is declared locally here.
*/
#define QBMAN_CINH_SWP_CFG 0xd00
-#define QBMAN_CINH_SWP_CFG 0xd00
+
#define SWP_CFG_DQRR_MF_SHIFT 20
#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_CPBS_SHIFT 15
#define SWP_CFG_WN_SHIFT 14
#define SWP_CFG_RPM_SHIFT 12
#define SWP_CFG_DCM_SHIFT 10
#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_VPM_SHIFT 7
+#define SWP_CFG_CPM_SHIFT 6
#define SWP_CFG_SD_SHIFT 5
#define SWP_CFG_SP_SHIFT 4
#define SWP_CFG_SE_SHIFT 3
@@ -329,11 +372,20 @@ static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
return reg;
}
+#define QMAN_RT_MODE 0x00000100
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+#define QMAN_REV_MASK 0xffff0000
+
static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
const struct qbman_swp_desc *d,
uint8_t dqrr_size)
{
uint32_t reg;
+ int i;
#ifdef RTE_ARCH_64
uint8_t wn = CENA_WRITE_ENABLE;
#else
@@ -343,7 +395,7 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
s->addr_cena = d->cena_bar;
s->addr_cinh = d->cinh_bar;
s->idx = (uint32_t)d->idx;
- s->cena = malloc(4096);
+ s->cena = malloc(64*1024);
if (!s->cena) {
pr_err("Could not allocate page for cena shadow\n");
return -1;
@@ -358,12 +410,34 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
QBMAN_BUG_ON(reg);
#endif
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ memset(s->addr_cena, 0, 64*1024);
+ else {
+ /* Invalidate the portal memory.
+ * This ensures no stale cache lines
+ */
+ for (i = 0; i < 0x1000; i += 64)
+ dccivac(s->addr_cena + i);
+ }
+
if (s->eqcr_mode == qman_eqcr_vb_array)
- reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1,
- 1, 1);
- else
- reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1,
- 1, 1);
+ reg = qbman_set_swp_cfg(dqrr_size, wn,
+ 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
+ else {
+ if ((d->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ reg = qbman_set_swp_cfg(dqrr_size, wn,
+ 1, 3, 2, 2, 1, 1, 1, 1, 1, 1);
+ else
+ reg = qbman_set_swp_cfg(dqrr_size, wn,
+ 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
+ }
+
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+ }
+
qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
if (!reg) {
@@ -371,6 +445,12 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
free(s->cena);
return -1;
}
+
+ if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
+ qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
+ }
+
return 0;
}
@@ -378,3 +458,5 @@ static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
{
free(s->cena);
}
+
+#endif /* _QBMAN_SYS_H_ */
diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
index fa6977fe..a29f5b46 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -3,6 +3,9 @@
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
*
*/
+#ifndef _QBMAN_SYS_DECL_H_
+#define _QBMAN_SYS_DECL_H_
+
#include <compat.h>
#include <fsl_qbman_base.h>
@@ -51,3 +54,4 @@ static inline void prefetch_for_store(void *p)
RTE_SET_USED(p);
}
#endif
+#endif /* _QBMAN_SYS_DECL_H_ */
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index fe45a113..dcc4e082 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -114,5 +114,18 @@ DPDK_18.05 {
dpdmai_open;
dpdmai_set_rx_queue;
rte_dpaa2_free_dpci_dev;
+ rte_dpaa2_memsegs;
} DPDK_18.02;
+
+DPDK_18.11 {
+ global:
+
+ dpaa2_dqrr_size;
+ dpaa2_eqcr_size;
+ dpci_get_link_state;
+ dpci_get_opr;
+ dpci_get_peer_attributes;
+ dpci_set_opr;
+
+} DPDK_18.05;
diff --git a/drivers/bus/ifpga/Makefile b/drivers/bus/ifpga/Makefile
index 3ff3bdb8..514452b3 100644
--- a/drivers/bus/ifpga/Makefile
+++ b/drivers/bus/ifpga/Makefile
@@ -19,7 +19,7 @@ LDLIBS += -lrte_kvargs
EXPORT_MAP := rte_bus_ifpga_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_bus.c
SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_common.c
diff --git a/drivers/bus/ifpga/ifpga_bus.c b/drivers/bus/ifpga/ifpga_bus.c
index b324872e..5f23ed8b 100644
--- a/drivers/bus/ifpga/ifpga_bus.c
+++ b/drivers/bus/ifpga/ifpga_bus.c
@@ -142,6 +142,7 @@ ifpga_scan_one(struct rte_rawdev *rawdev,
if (!afu_dev)
goto end;
+ afu_dev->device.bus = &rte_ifpga_bus;
afu_dev->device.devargs = devargs;
afu_dev->device.numa_node = SOCKET_ID_ANY;
afu_dev->device.name = devargs->name;
@@ -279,14 +280,13 @@ ifpga_probe_one_driver(struct rte_afu_driver *drv,
/* reference driver structure */
afu_dev->driver = drv;
- afu_dev->device.driver = &drv->driver;
/* call the driver probe() function */
ret = drv->probe(afu_dev);
- if (ret) {
+ if (ret)
afu_dev->driver = NULL;
- afu_dev->device.driver = NULL;
- }
+ else
+ afu_dev->device.driver = &drv->driver;
return ret;
}
@@ -301,8 +301,11 @@ ifpga_probe_all_drivers(struct rte_afu_device *afu_dev)
return -1;
/* Check if a driver is already loaded */
- if (afu_dev->driver != NULL)
- return 0;
+ if (rte_dev_is_probed(&afu_dev->device)) {
+ IFPGA_BUS_DEBUG("Device %s is already probed\n",
+ rte_ifpga_device_name(afu_dev));
+ return -EEXIST;
+ }
TAILQ_FOREACH(drv, &ifpga_afu_drv_list, next) {
if (ifpga_probe_one_driver(drv, afu_dev)) {
@@ -325,14 +328,13 @@ ifpga_probe(void)
int ret = 0;
TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
- if (afu_dev->device.driver)
- continue;
-
ret = ifpga_probe_all_drivers(afu_dev);
+ if (ret == -EEXIST)
+ continue;
if (ret < 0)
IFPGA_BUS_ERR("failed to initialize %s device\n",
rte_ifpga_device_name(afu_dev));
- }
+ }
return ret;
}
@@ -347,23 +349,20 @@ static int
ifpga_remove_driver(struct rte_afu_device *afu_dev)
{
const char *name;
- const struct rte_afu_driver *driver;
name = rte_ifpga_device_name(afu_dev);
- if (!afu_dev->device.driver) {
+ if (afu_dev->driver == NULL) {
IFPGA_BUS_DEBUG("no driver attach to device %s\n", name);
return 1;
}
- driver = RTE_DRV_TO_AFU_CONST(afu_dev->device.driver);
- return driver->remove(afu_dev);
+ return afu_dev->driver->remove(afu_dev);
}
static int
ifpga_unplug(struct rte_device *dev)
{
struct rte_afu_device *afu_dev = NULL;
- struct rte_devargs *devargs = NULL;
int ret;
if (dev == NULL)
@@ -373,15 +372,13 @@ ifpga_unplug(struct rte_device *dev)
if (!afu_dev)
return -ENOENT;
- devargs = dev->devargs;
-
ret = ifpga_remove_driver(afu_dev);
if (ret)
return ret;
TAILQ_REMOVE(&ifpga_afu_dev_list, afu_dev, next);
- rte_devargs_remove(devargs->bus->name, devargs->name);
+ rte_devargs_remove(dev->devargs);
free(afu_dev);
return 0;
diff --git a/drivers/bus/ifpga/meson.build b/drivers/bus/ifpga/meson.build
index c9b08c86..0b5c38d5 100644
--- a/drivers/bus/ifpga/meson.build
+++ b/drivers/bus/ifpga/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2018 Intel Corporation
+version = 2
+
deps += ['pci', 'kvargs', 'rawdev']
install_headers('rte_bus_ifpga.h')
sources = files('ifpga_common.c', 'ifpga_bus.c')
diff --git a/drivers/bus/ifpga/rte_bus_ifpga.h b/drivers/bus/ifpga/rte_bus_ifpga.h
index 51d5ae0d..d53c0f48 100644
--- a/drivers/bus/ifpga/rte_bus_ifpga.h
+++ b/drivers/bus/ifpga/rte_bus_ifpga.h
@@ -83,9 +83,6 @@ struct rte_afu_device {
#define RTE_DEV_TO_AFU(ptr) \
container_of(ptr, struct rte_afu_device, device)
-#define RTE_DRV_TO_AFU_CONST(ptr) \
- container_of(ptr, const struct rte_afu_driver, driver)
-
/**
* Initialization function for the driver called during FPGA BUS probing.
*/
diff --git a/drivers/bus/pci/Makefile b/drivers/bus/pci/Makefile
index cf373068..f33e0120 100644
--- a/drivers/bus/pci/Makefile
+++ b/drivers/bus/pci/Makefile
@@ -4,7 +4,7 @@
include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_bus_pci.a
-LIBABIVER := 1
+LIBABIVER := 2
EXPORT_MAP := rte_bus_pci_version.map
CFLAGS := -I$(SRCDIR) $(CFLAGS)
@@ -26,10 +26,11 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_ethdev -lrte_pci
+LDLIBS += -lrte_ethdev -lrte_pci -lrte_kvargs
include $(RTE_SDK)/drivers/bus/pci/$(SYSTEM)/Makefile
SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_params.c
SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common.c
SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common_uio.c
diff --git a/drivers/bus/pci/bsd/pci.c b/drivers/bus/pci/bsd/pci.c
index 655b34b7..d09f8ee5 100644
--- a/drivers/bus/pci/bsd/pci.c
+++ b/drivers/bus/pci/bsd/pci.c
@@ -223,6 +223,8 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
}
memset(dev, 0, sizeof(*dev));
+ dev->device.bus = &rte_pci_bus.bus;
+
dev->addr.domain = conf->pc_sel.pc_domain;
dev->addr.bus = conf->pc_sel.pc_bus;
dev->addr.devid = conf->pc_sel.pc_dev;
@@ -439,6 +441,8 @@ int rte_pci_read_config(const struct rte_pci_device *dev,
{
int fd = -1;
int size;
+ /* Copy Linux implementation's behaviour */
+ const int return_len = len;
struct pci_io pi = {
.pi_sel = {
.pc_domain = dev->addr.domain,
@@ -469,7 +473,7 @@ int rte_pci_read_config(const struct rte_pci_device *dev,
}
close(fd);
- return 0;
+ return return_len;
error:
if (fd >= 0)
diff --git a/drivers/bus/pci/linux/Makefile b/drivers/bus/pci/linux/Makefile
index 96ea1d54..90404468 100644
--- a/drivers/bus/pci/linux/Makefile
+++ b/drivers/bus/pci/linux/Makefile
@@ -4,5 +4,3 @@
SRCS += pci.c
SRCS += pci_uio.c
SRCS += pci_vfio.c
-
-CFLAGS += -D_GNU_SOURCE
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index 04648ac9..45c24ef7 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -119,7 +119,7 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
static int
find_max_end_va(const struct rte_memseg_list *msl, void *arg)
{
- size_t sz = msl->memseg_arr.len * msl->page_sz;
+ size_t sz = msl->len;
void *end_va = RTE_PTR_ADD(msl->base_va, sz);
void **max_va = arg;
@@ -228,6 +228,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
return -1;
memset(dev, 0, sizeof(*dev));
+ dev->device.bus = &rte_pci_bus.bus;
dev->addr = *addr;
/* get vendor id */
@@ -588,10 +589,8 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev)
fclose(fp);
mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
- if (mgaw < X86_VA_WIDTH)
- return false;
- return true;
+ return rte_eal_check_dma_mask(mgaw) == 0 ? true : false;
}
#elif defined(RTE_ARCH_PPC_64)
static bool
@@ -620,8 +619,11 @@ pci_devices_iommu_support_va(void)
FOREACH_DEVICE_ON_PCIBUS(dev) {
if (!rte_pci_match(drv, dev))
continue;
- if (!pci_one_device_iommu_support_va(dev))
- return false;
+ /*
+ * just one PCI device needs to be checked out because
+ * the IOMMU hardware is the same for all of them.
+ */
+ return pci_one_device_iommu_support_va(dev);
}
}
return true;
@@ -672,23 +674,21 @@ rte_pci_get_iommu_class(void)
int rte_pci_read_config(const struct rte_pci_device *device,
void *buf, size_t len, off_t offset)
{
+ char devname[RTE_DEV_NAME_MAX_LEN] = "";
const struct rte_intr_handle *intr_handle = &device->intr_handle;
- switch (intr_handle->type) {
- case RTE_INTR_HANDLE_UIO:
- case RTE_INTR_HANDLE_UIO_INTX:
+ switch (device->kdrv) {
+ case RTE_KDRV_IGB_UIO:
return pci_uio_read_config(intr_handle, buf, len, offset);
-
#ifdef VFIO_PRESENT
- case RTE_INTR_HANDLE_VFIO_MSIX:
- case RTE_INTR_HANDLE_VFIO_MSI:
- case RTE_INTR_HANDLE_VFIO_LEGACY:
+ case RTE_KDRV_VFIO:
return pci_vfio_read_config(intr_handle, buf, len, offset);
#endif
default:
+ rte_pci_device_name(&device->addr, devname,
+ RTE_DEV_NAME_MAX_LEN);
RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ "Unknown driver type for %s\n", devname);
return -1;
}
}
@@ -697,23 +697,21 @@ int rte_pci_read_config(const struct rte_pci_device *device,
int rte_pci_write_config(const struct rte_pci_device *device,
const void *buf, size_t len, off_t offset)
{
+ char devname[RTE_DEV_NAME_MAX_LEN] = "";
const struct rte_intr_handle *intr_handle = &device->intr_handle;
- switch (intr_handle->type) {
- case RTE_INTR_HANDLE_UIO:
- case RTE_INTR_HANDLE_UIO_INTX:
+ switch (device->kdrv) {
+ case RTE_KDRV_IGB_UIO:
return pci_uio_write_config(intr_handle, buf, len, offset);
-
#ifdef VFIO_PRESENT
- case RTE_INTR_HANDLE_VFIO_MSIX:
- case RTE_INTR_HANDLE_VFIO_MSI:
- case RTE_INTR_HANDLE_VFIO_LEGACY:
+ case RTE_KDRV_VFIO:
return pci_vfio_write_config(intr_handle, buf, len, offset);
#endif
default:
+ rte_pci_device_name(&device->addr, devname,
+ RTE_DEV_NAME_MAX_LEN);
RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ "Unknown driver type for %s\n", devname);
return -1;
}
}
diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
index 686386d6..305cc060 100644
--- a/drivers/bus/pci/linux/pci_vfio.c
+++ b/drivers/bus/pci/linux/pci_vfio.c
@@ -17,6 +17,8 @@
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
#include <rte_vfio.h>
+#include <rte_eal.h>
+#include <rte_bus.h>
#include "eal_filesystem.h"
@@ -35,7 +37,9 @@
#ifdef VFIO_PRESENT
+#ifndef PAGE_SIZE
#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#endif
#define PAGE_MASK (~(PAGE_SIZE - 1))
static struct rte_tailq_elem rte_vfio_tailq = {
@@ -277,6 +281,114 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
return -1;
}
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+static void
+pci_vfio_req_handler(void *param)
+{
+ struct rte_bus *bus;
+ int ret;
+ struct rte_device *device = (struct rte_device *)param;
+
+ bus = rte_bus_find_by_device(device);
+ if (bus == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
+ device->name);
+ return;
+ }
+
+ /*
+ * vfio kernel module request user space to release allocated
+ * resources before device be deleted in kernel, so it can directly
+ * call the vfio bus hot-unplug handler to process it.
+ */
+ ret = bus->hot_unplug_handler(device);
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "Can not handle hot-unplug for device (%s)\n",
+ device->name);
+}
+
+/* enable notifier (only enable req now) */
+static int
+pci_vfio_enable_notifier(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ int ret;
+ int fd = -1;
+
+ /* set up an eventfd for req notifier */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot set up eventfd, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ dev->vfio_req_intr_handle.fd = fd;
+ dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_VFIO_REQ;
+ dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
+
+ ret = rte_intr_callback_register(&dev->vfio_req_intr_handle,
+ pci_vfio_req_handler,
+ (void *)&dev->device);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Fail to register req notifier handler.\n");
+ goto error;
+ }
+
+ ret = rte_intr_enable(&dev->vfio_req_intr_handle);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Fail to enable req notifier.\n");
+ ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
+ pci_vfio_req_handler,
+ (void *)&dev->device);
+ if (ret < 0)
+ RTE_LOG(ERR, EAL,
+ "Fail to unregister req notifier handler.\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ close(fd);
+
+ dev->vfio_req_intr_handle.fd = -1;
+ dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ dev->vfio_req_intr_handle.vfio_dev_fd = -1;
+
+ return -1;
+}
+
+/* disable notifier (only disable req now) */
+static int
+pci_vfio_disable_notifier(struct rte_pci_device *dev)
+{
+ int ret;
+
+ ret = rte_intr_disable(&dev->vfio_req_intr_handle);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
+ return -1;
+ }
+
+ ret = rte_intr_callback_unregister(&dev->vfio_req_intr_handle,
+ pci_vfio_req_handler,
+ (void *)&dev->device);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "fail to unregister req notifier handler.\n");
+ return -1;
+ }
+
+ close(dev->vfio_req_intr_handle.fd);
+
+ dev->vfio_req_intr_handle.fd = -1;
+ dev->vfio_req_intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ dev->vfio_req_intr_handle.vfio_dev_fd = -1;
+
+ return 0;
+}
+#endif
+
static int
pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
{
@@ -415,6 +527,93 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
return 0;
}
+/*
+ * region info may contain capability headers, so we need to keep reallocating
+ * the memory until we match allocated memory size with argsz.
+ */
+static int
+pci_vfio_get_region_info(int vfio_dev_fd, struct vfio_region_info **info,
+ int region)
+{
+ struct vfio_region_info *ri;
+ size_t argsz = sizeof(*ri);
+ int ret;
+
+ ri = malloc(sizeof(*ri));
+ if (ri == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for region info\n");
+ return -1;
+ }
+again:
+ memset(ri, 0, argsz);
+ ri->argsz = argsz;
+ ri->index = region;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ri);
+ if (ret < 0) {
+ free(ri);
+ return ret;
+ }
+ if (ri->argsz != argsz) {
+ struct vfio_region_info *tmp;
+
+ argsz = ri->argsz;
+ tmp = realloc(ri, argsz);
+
+ if (tmp == NULL) {
+ /* realloc failed but the ri is still there */
+ free(ri);
+ RTE_LOG(ERR, EAL, "Cannot reallocate memory for region info\n");
+ return -1;
+ }
+ ri = tmp;
+ goto again;
+ }
+ *info = ri;
+
+ return 0;
+}
+
+static struct vfio_info_cap_header *
+pci_vfio_info_cap(struct vfio_region_info *info, int cap)
+{
+ struct vfio_info_cap_header *h;
+ size_t offset;
+
+ if ((info->flags & RTE_VFIO_INFO_FLAG_CAPS) == 0) {
+ /* VFIO info does not advertise capabilities */
+ return NULL;
+ }
+
+ offset = VFIO_CAP_OFFSET(info);
+ while (offset != 0) {
+ h = RTE_PTR_ADD(info, offset);
+ if (h->id == cap)
+ return h;
+ offset = h->next;
+ }
+ return NULL;
+}
+
+static int
+pci_vfio_msix_is_mappable(int vfio_dev_fd, int msix_region)
+{
+ struct vfio_region_info *info;
+ int ret;
+
+ ret = pci_vfio_get_region_info(vfio_dev_fd, &info, msix_region);
+ if (ret < 0)
+ return -1;
+
+ ret = pci_vfio_info_cap(info, RTE_VFIO_CAP_MSIX_MAPPABLE) != NULL;
+
+ /* cleanup */
+ free(info);
+
+ return ret;
+}
+
+
static int
pci_vfio_map_resource_primary(struct rte_pci_device *dev)
{
@@ -430,6 +629,9 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
struct pci_map *maps;
dev->intr_handle.fd = -1;
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ dev->vfio_req_intr_handle.fd = -1;
+#endif
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
@@ -464,56 +666,75 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
if (ret < 0) {
RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n",
pci_addr);
- goto err_vfio_dev_fd;
+ goto err_vfio_res;
+ }
+ /* if we found our MSI-X BAR region, check if we can mmap it */
+ if (vfio_res->msix_table.bar_index != -1) {
+ int ret = pci_vfio_msix_is_mappable(vfio_dev_fd,
+ vfio_res->msix_table.bar_index);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Couldn't check if MSI-X BAR is mappable\n");
+ goto err_vfio_res;
+ } else if (ret != 0) {
+ /* we can map it, so we don't care where it is */
+ RTE_LOG(DEBUG, EAL, "VFIO reports MSI-X BAR as mappable\n");
+ vfio_res->msix_table.bar_index = -1;
+ }
}
for (i = 0; i < (int) vfio_res->nb_maps; i++) {
- struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ struct vfio_region_info *reg = NULL;
void *bar_addr;
- reg.index = i;
-
- ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
- if (ret) {
+ ret = pci_vfio_get_region_info(vfio_dev_fd, &reg, i);
+ if (ret < 0) {
RTE_LOG(ERR, EAL, " %s cannot get device region info "
- "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ "error %i (%s)\n", pci_addr, errno,
+ strerror(errno));
goto err_vfio_res;
}
/* chk for io port region */
ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
- if (ret < 0)
+ if (ret < 0) {
+ free(reg);
goto err_vfio_res;
- else if (ret) {
+ } else if (ret) {
RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
i);
+ free(reg);
continue;
}
/* skip non-mmapable BARs */
- if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
+ if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
+ free(reg);
continue;
+ }
/* try mapping somewhere close to the end of hugepages */
if (pci_map_addr == NULL)
pci_map_addr = pci_find_max_end_va();
bar_addr = pci_map_addr;
- pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
+ pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg->size);
maps[i].addr = bar_addr;
- maps[i].offset = reg.offset;
- maps[i].size = reg.size;
+ maps[i].offset = reg->offset;
+ maps[i].size = reg->size;
maps[i].path = NULL; /* vfio doesn't have per-resource paths */
ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
if (ret < 0) {
RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
pci_addr, i, strerror(errno));
+ free(reg);
goto err_vfio_res;
}
dev->mem_resource[i].addr = maps[i].addr;
+
+ free(reg);
}
if (pci_rte_vfio_setup_device(dev, vfio_dev_fd) < 0) {
@@ -521,6 +742,13 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
goto err_vfio_res;
}
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ if (pci_vfio_enable_notifier(dev, vfio_dev_fd) != 0) {
+ RTE_LOG(ERR, EAL, "Error setting up notifier!\n");
+ goto err_vfio_res;
+ }
+
+#endif
TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
return 0;
@@ -546,6 +774,9 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
struct pci_map *maps;
dev->intr_handle.fd = -1;
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ dev->vfio_req_intr_handle.fd = -1;
+#endif
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
@@ -586,6 +817,9 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
/* we need save vfio_dev_fd, so it can be used during release */
dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ dev->vfio_req_intr_handle.vfio_dev_fd = vfio_dev_fd;
+#endif
return 0;
err_vfio_dev_fd:
@@ -658,6 +892,14 @@ pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ ret = pci_vfio_disable_notifier(dev);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "fail to disable req notifier.\n");
+ return -1;
+ }
+
+#endif
if (close(dev->intr_handle.fd) < 0) {
RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
pci_addr);
diff --git a/drivers/bus/pci/meson.build b/drivers/bus/pci/meson.build
index 72939e59..a3140ff9 100644
--- a/drivers/bus/pci/meson.build
+++ b/drivers/bus/pci/meson.build
@@ -1,15 +1,18 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
deps += ['pci']
install_headers('rte_bus_pci.h')
-sources = files('pci_common.c', 'pci_common_uio.c')
+sources = files('pci_common.c',
+ 'pci_common_uio.c',
+ 'pci_params.c')
if host_machine.system() == 'linux'
sources += files('linux/pci.c',
'linux/pci_uio.c',
'linux/pci_vfio.c')
includes += include_directories('linux')
- cflags += ['-D_GNU_SOURCE']
else
sources += files('bsd/pci.c')
includes += include_directories('bsd')
@@ -17,3 +20,5 @@ endif
# memseg walk is not part of stable API yet
allow_experimental_apis = true
+
+deps += ['kvargs']
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 7736b3f9..6276e5d6 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -6,6 +6,7 @@
#include <string.h>
#include <inttypes.h>
#include <stdint.h>
+#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/queue.h>
@@ -23,12 +24,11 @@
#include <rte_string_fns.h>
#include <rte_common.h>
#include <rte_devargs.h>
+#include <rte_vfio.h>
#include "private.h"
-extern struct rte_pci_bus rte_pci_bus;
-
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
const char *rte_pci_get_sysfs_path(void)
@@ -123,6 +123,7 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
struct rte_pci_device *dev)
{
int ret;
+ bool already_probed;
struct rte_pci_addr *loc;
if ((dr == NULL) || (dev == NULL))
@@ -153,6 +154,13 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
dev->device.numa_node = 0;
}
+ already_probed = rte_dev_is_probed(&dev->device);
+ if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
+ RTE_LOG(DEBUG, EAL, "Device %s is already probed\n",
+ dev->device.name);
+ return -EEXIST;
+ }
+
RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
dev->id.device_id, dr->driver.name);
@@ -161,24 +169,24 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
* This needs to be before rte_pci_map_device(), as it enables to use
* driver flags for adjusting configuration.
*/
- dev->driver = dr;
- dev->device.driver = &dr->driver;
+ if (!already_probed)
+ dev->driver = dr;
- if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+ if (!already_probed && (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)) {
/* map resources for devices that use igb_uio */
ret = rte_pci_map_device(dev);
if (ret != 0) {
dev->driver = NULL;
- dev->device.driver = NULL;
return ret;
}
}
/* call the driver probe() function */
ret = dr->probe(dr, dev);
+ if (already_probed)
+ return ret; /* no rollback if already succeeded earlier */
if (ret) {
dev->driver = NULL;
- dev->device.driver = NULL;
if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
/* Don't unmap if device is unsupported and
* driver needs mapped resources.
@@ -186,6 +194,8 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
!(ret > 0 &&
(dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
rte_pci_unmap_device(dev);
+ } else {
+ dev->device.driver = &dr->driver;
}
return ret;
@@ -233,7 +243,7 @@ rte_pci_detach_dev(struct rte_pci_device *dev)
/*
* If vendor/device ID match, call the probe() function of all
- * registered driver for the given device. Return -1 if initialization
+ * registered driver for the given device. Return < 0 if initialization
* failed, return 1 if no driver is found for this device.
*/
static int
@@ -243,17 +253,13 @@ pci_probe_all_drivers(struct rte_pci_device *dev)
int rc = 0;
if (dev == NULL)
- return -1;
-
- /* Check if a driver is already loaded */
- if (dev->driver != NULL)
- return 0;
+ return -EINVAL;
FOREACH_DRIVER_ON_PCIBUS(dr) {
rc = rte_pci_probe_one_driver(dr, dev);
if (rc < 0)
/* negative value is an error */
- return -1;
+ return rc;
if (rc > 0)
/* positive value means driver doesn't support it */
continue;
@@ -290,11 +296,14 @@ rte_pci_probe(void)
devargs->policy == RTE_DEV_WHITELISTED)
ret = pci_probe_all_drivers(dev);
if (ret < 0) {
- RTE_LOG(ERR, EAL, "Requested device " PCI_PRI_FMT
- " cannot be used\n", dev->addr.domain, dev->addr.bus,
- dev->addr.devid, dev->addr.function);
- rte_errno = errno;
- failed++;
+ if (ret != -EEXIST) {
+ RTE_LOG(ERR, EAL, "Requested device "
+ PCI_PRI_FMT " cannot be used\n",
+ dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ rte_errno = errno;
+ failed++;
+ }
ret = 0;
}
}
@@ -405,6 +414,98 @@ pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
return NULL;
}
+/*
+ * find the device which encounter the failure, by iterate over all device on
+ * PCI bus to check if the memory failure address is located in the range
+ * of the BARs of the device.
+ */
+static struct rte_pci_device *
+pci_find_device_by_addr(const void *failure_addr)
+{
+ struct rte_pci_device *pdev = NULL;
+ uint64_t check_point, start, end, len;
+ int i;
+
+ check_point = (uint64_t)(uintptr_t)failure_addr;
+
+ FOREACH_DEVICE_ON_PCIBUS(pdev) {
+ for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) {
+ start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr;
+ len = pdev->mem_resource[i].len;
+ end = start + len;
+ if (check_point >= start && check_point < end) {
+ RTE_LOG(DEBUG, EAL, "Failure address %16.16"
+ PRIx64" belongs to device %s!\n",
+ check_point, pdev->device.name);
+ return pdev;
+ }
+ }
+ }
+ return NULL;
+}
+
+static int
+pci_hot_unplug_handler(struct rte_device *dev)
+{
+ struct rte_pci_device *pdev = NULL;
+ int ret = 0;
+
+ pdev = RTE_DEV_TO_PCI(dev);
+ if (!pdev)
+ return -1;
+
+ switch (pdev->kdrv) {
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ case RTE_KDRV_VFIO:
+ /*
+ * vfio kernel module guaranty the pci device would not be
+ * deleted until the user space release the resource, so no
+ * need to remap BARs resource here, just directly notify
+ * the req event to the user space to handle it.
+ */
+ rte_dev_event_callback_process(dev->name,
+ RTE_DEV_EVENT_REMOVE);
+ break;
+#endif
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
+ case RTE_KDRV_NIC_UIO:
+ /* BARs resource is invalid, remap it to be safe. */
+ ret = pci_uio_remap_resource(pdev);
+ break;
+ default:
+ RTE_LOG(DEBUG, EAL,
+ "Not managed by a supported kernel driver, skipped\n");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+pci_sigbus_handler(const void *failure_addr)
+{
+ struct rte_pci_device *pdev = NULL;
+ int ret = 0;
+
+ pdev = pci_find_device_by_addr(failure_addr);
+ if (!pdev) {
+ /* It is a generic sigbus error, no bus would handle it. */
+ ret = 1;
+ } else {
+ /* The sigbus error is caused of hot-unplug. */
+ ret = pci_hot_unplug_handler(&pdev->device);
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ "Failed to handle hot-unplug for device %s",
+ pdev->name);
+ ret = -1;
+ }
+ }
+ return ret;
+}
+
static int
pci_plug(struct rte_device *dev)
{
@@ -421,6 +522,7 @@ pci_unplug(struct rte_device *dev)
ret = rte_pci_detach_dev(pdev);
if (ret == 0) {
rte_pci_remove_device(pdev);
+ rte_devargs_remove(dev->devargs);
free(pdev);
}
return ret;
@@ -435,6 +537,9 @@ struct rte_pci_bus rte_pci_bus = {
.unplug = pci_unplug,
.parse = pci_parse,
.get_iommu_class = rte_pci_get_iommu_class,
+ .dev_iterate = rte_pci_dev_iterate,
+ .hot_unplug_handler = pci_hot_unplug_handler,
+ .sigbus_handler = pci_sigbus_handler,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
diff --git a/drivers/bus/pci/pci_common_uio.c b/drivers/bus/pci/pci_common_uio.c
index 54bc20b5..7ea73dbc 100644
--- a/drivers/bus/pci/pci_common_uio.c
+++ b/drivers/bus/pci/pci_common_uio.c
@@ -146,6 +146,39 @@ pci_uio_unmap(struct mapped_pci_resource *uio_res)
}
}
+/* remap the PCI resource of a PCI device in anonymous virtual memory */
+int
+pci_uio_remap_resource(struct rte_pci_device *dev)
+{
+ int i;
+ void *map_address;
+
+ if (dev == NULL)
+ return -1;
+
+ /* Remap all BARs */
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ if (dev->mem_resource[i].phys_addr == 0)
+ continue;
+ map_address = mmap(dev->mem_resource[i].addr,
+ (size_t)dev->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (map_address == MAP_FAILED) {
+ RTE_LOG(ERR, EAL,
+ "Cannot remap resource for device %s\n",
+ dev->name);
+ return -1;
+ }
+ RTE_LOG(INFO, EAL,
+ "Successful remap resource for device %s\n",
+ dev->name);
+ }
+
+ return 0;
+}
+
static struct mapped_pci_resource *
pci_uio_find_resource(struct rte_pci_device *dev)
{
diff --git a/drivers/bus/pci/pci_params.c b/drivers/bus/pci/pci_params.c
new file mode 100644
index 00000000..3192e9c9
--- /dev/null
+++ b/drivers/bus/pci/pci_params.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#include <rte_bus.h>
+#include <rte_bus_pci.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
+#include <rte_pci.h>
+
+#include "private.h"
+
+enum pci_params {
+ RTE_PCI_PARAM_ADDR,
+ RTE_PCI_PARAM_MAX,
+};
+
+static const char * const pci_params_keys[] = {
+ [RTE_PCI_PARAM_ADDR] = "addr",
+ [RTE_PCI_PARAM_MAX] = NULL,
+};
+
+static int
+pci_addr_kv_cmp(const char *key __rte_unused,
+ const char *value,
+ void *_addr2)
+{
+ struct rte_pci_addr _addr1;
+ struct rte_pci_addr *addr1 = &_addr1;
+ struct rte_pci_addr *addr2 = _addr2;
+
+ if (rte_pci_addr_parse(value, addr1))
+ return -1;
+ return -abs(rte_pci_addr_cmp(addr1, addr2));
+}
+
+static int
+pci_dev_match(const struct rte_device *dev,
+ const void *_kvlist)
+{
+ const struct rte_kvargs *kvlist = _kvlist;
+ const struct rte_pci_device *pdev;
+
+ if (kvlist == NULL)
+ /* Empty string matches everything. */
+ return 0;
+ pdev = RTE_DEV_TO_PCI_CONST(dev);
+ /* if any field does not match. */
+ if (rte_kvargs_process(kvlist, pci_params_keys[RTE_PCI_PARAM_ADDR],
+ &pci_addr_kv_cmp,
+ (void *)(intptr_t)&pdev->addr))
+ return 1;
+ return 0;
+}
+
+void *
+rte_pci_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it __rte_unused)
+{
+ rte_bus_find_device_t find_device;
+ struct rte_kvargs *kvargs = NULL;
+ struct rte_device *dev;
+
+ if (str != NULL) {
+ kvargs = rte_kvargs_parse(str, pci_params_keys);
+ if (kvargs == NULL) {
+ RTE_LOG(ERR, EAL, "cannot parse argument list\n");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ }
+ find_device = rte_pci_bus.bus.find_device;
+ dev = find_device(start, pci_dev_match, kvargs);
+ rte_kvargs_free(kvargs);
+ return dev;
+}
diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h
index 8ddd03e1..13c3324b 100644
--- a/drivers/bus/pci/private.h
+++ b/drivers/bus/pci/private.h
@@ -10,9 +10,13 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
+extern struct rte_pci_bus rte_pci_bus;
+
struct rte_pci_driver;
struct rte_pci_device;
+extern struct rte_pci_bus rte_pci_bus;
+
/**
* Probe the PCI bus
*
@@ -123,6 +127,18 @@ void pci_uio_free_resource(struct rte_pci_device *dev,
struct mapped_pci_resource *uio_res);
/**
+ * Remap the PCI resource of a PCI device in anonymous virtual memory.
+ *
+ * @param dev
+ * Point to the struct rte pci device.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+pci_uio_remap_resource(struct rte_pci_device *dev);
+
+/**
* Map device memory to uio resource
*
* This function is private to EAL.
@@ -166,4 +182,27 @@ rte_pci_match(const struct rte_pci_driver *pci_drv,
enum rte_iova_mode
rte_pci_get_iommu_class(void);
+/*
+ * Iterate over internal devices,
+ * matching any device against the provided
+ * string.
+ *
+ * @param start
+ * Iteration starting point.
+ *
+ * @param str
+ * Device string to match against.
+ *
+ * @param it
+ * (unused) iterator structure.
+ *
+ * @return
+ * A pointer to the next matching device if any.
+ * NULL otherwise.
+ */
+void *
+rte_pci_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it);
+
#endif /* _PCI_PRIVATE_H_ */
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
index 0d1955ff..f0d6d81c 100644
--- a/drivers/bus/pci/rte_bus_pci.h
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -62,10 +62,12 @@ struct rte_pci_device {
struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
/**< PCI Memory Resource */
struct rte_intr_handle intr_handle; /**< Interrupt handle */
- struct rte_pci_driver *driver; /**< Associated driver */
+ struct rte_pci_driver *driver; /**< PCI driver used in probing */
uint16_t max_vfs; /**< sriov enable if not zero */
enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
char name[PCI_PRI_STR_SIZE+1]; /**< PCI location (ASCII) */
+ struct rte_intr_handle vfio_req_intr_handle;
+ /**< Handler of VFIO request interrupt */
};
/**
@@ -121,7 +123,7 @@ struct rte_pci_driver {
pci_probe_t *probe; /**< Device Probe function. */
pci_remove_t *remove; /**< Device Remove function. */
const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
- uint32_t drv_flags; /**< Flags contolling handling of device. */
+ uint32_t drv_flags; /**< Flags RTE_PCI_DRV_*. */
};
/**
@@ -137,6 +139,8 @@ struct rte_pci_bus {
#define RTE_PCI_DRV_NEED_MAPPING 0x0001
/** Device needs PCI BAR mapping with enabled write combining (wc) */
#define RTE_PCI_DRV_WC_ACTIVATE 0x0002
+/** Device already probed can be probed again to check for new ports. */
+#define RTE_PCI_DRV_PROBE_AGAIN 0x0004
/** Device driver supports link state interrupt */
#define RTE_PCI_DRV_INTR_LSC 0x0008
/** Device driver supports device removal interrupt */
@@ -219,6 +223,8 @@ void rte_pci_unregister(struct rte_pci_driver *driver);
* The length of the data buffer.
* @param offset
* The offset into PCI config space
+ * @return
+ * Number of bytes read on success, negative on error.
*/
int rte_pci_read_config(const struct rte_pci_device *device,
void *buf, size_t len, off_t offset);
diff --git a/drivers/bus/vdev/Makefile b/drivers/bus/vdev/Makefile
index bd0bb895..803b8ea7 100644
--- a/drivers/bus/vdev/Makefile
+++ b/drivers/bus/vdev/Makefile
@@ -16,11 +16,12 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
EXPORT_MAP := rte_bus_vdev_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
SRCS-y += vdev.c
+SRCS-y += vdev_params.c
-LDLIBS += -lrte_eal
+LDLIBS += -lrte_eal -lrte_kvargs
#
# Export include files
diff --git a/drivers/bus/vdev/meson.build b/drivers/bus/vdev/meson.build
index 2ee648b4..803785f1 100644
--- a/drivers/bus/vdev/meson.build
+++ b/drivers/bus/vdev/meson.build
@@ -1,7 +1,12 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-sources = files('vdev.c')
+version = 2
+
+sources = files('vdev.c',
+ 'vdev_params.c')
install_headers('rte_bus_vdev.h')
allow_experimental_apis = true
+
+deps += ['kvargs']
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index 6139dd55..9c66bdc7 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -23,6 +23,7 @@
#include "rte_bus_vdev.h"
#include "vdev_logs.h"
+#include "vdev_private.h"
#define VDEV_MP_KEY "bus_vdev_mp"
@@ -40,7 +41,7 @@ static struct vdev_device_list vdev_device_list =
static rte_spinlock_recursive_t vdev_device_list_lock =
RTE_SPINLOCK_RECURSIVE_INITIALIZER;
-struct vdev_driver_list vdev_driver_list =
+static struct vdev_driver_list vdev_driver_list =
TAILQ_HEAD_INITIALIZER(vdev_driver_list);
struct vdev_custom_scan {
@@ -149,10 +150,9 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
if (vdev_parse(name, &driver))
return -1;
- dev->device.driver = &driver->driver;
ret = driver->probe(dev);
- if (ret)
- dev->device.driver = NULL;
+ if (ret == 0)
+ dev->device.driver = &driver->driver;
return ret;
}
@@ -202,7 +202,9 @@ alloc_devargs(const char *name, const char *args)
}
static int
-insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
+insert_vdev(const char *name, const char *args,
+ struct rte_vdev_device **p_dev,
+ bool init)
{
struct rte_vdev_device *dev;
struct rte_devargs *devargs;
@@ -221,17 +223,24 @@ insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
goto fail;
}
+ dev->device.bus = &rte_vdev_bus;
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
if (find_vdev(name)) {
+ /*
+ * A vdev is expected to have only one port.
+ * So there is no reason to try probing again,
+ * even with new arguments.
+ */
ret = -EEXIST;
goto fail;
}
TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
- rte_devargs_insert(devargs);
+ if (init)
+ rte_devargs_insert(devargs);
if (p_dev)
*p_dev = dev;
@@ -248,20 +257,18 @@ int
rte_vdev_init(const char *name, const char *args)
{
struct rte_vdev_device *dev;
- struct rte_devargs *devargs;
int ret;
rte_spinlock_recursive_lock(&vdev_device_list_lock);
- ret = insert_vdev(name, args, &dev);
+ ret = insert_vdev(name, args, &dev, true);
if (ret == 0) {
ret = vdev_probe_all_drivers(dev);
if (ret) {
if (ret > 0)
VDEV_LOG(ERR, "no driver found for %s", name);
/* If fails, remove it from vdev list */
- devargs = dev->device.devargs;
TAILQ_REMOVE(&vdev_device_list, dev, next);
- rte_devargs_remove(devargs->bus->name, devargs->name);
+ rte_devargs_remove(dev->device.devargs);
free(dev);
}
}
@@ -289,7 +296,6 @@ int
rte_vdev_uninit(const char *name)
{
struct rte_vdev_device *dev;
- struct rte_devargs *devargs;
int ret;
if (name == NULL)
@@ -308,8 +314,7 @@ rte_vdev_uninit(const char *name)
goto unlock;
TAILQ_REMOVE(&vdev_device_list, dev, next);
- devargs = dev->device.devargs;
- rte_devargs_remove(devargs->bus->name, devargs->name);
+ rte_devargs_remove(dev->device.devargs);
free(dev);
unlock:
@@ -346,6 +351,7 @@ vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
const struct vdev_param *in = (const struct vdev_param *)mp_msg->param;
const char *devname;
int num;
+ int ret;
strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name));
mp_resp.len_param = sizeof(*ou);
@@ -380,7 +386,10 @@ vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
break;
case VDEV_SCAN_ONE:
VDEV_LOG(INFO, "receive vdev, %s", in->name);
- if (insert_vdev(in->name, NULL, NULL) < 0)
+ ret = insert_vdev(in->name, NULL, NULL, false);
+ if (ret == -EEXIST)
+ VDEV_LOG(DEBUG, "device already exist, %s", in->name);
+ else if (ret < 0)
VDEV_LOG(ERR, "failed to add vdev, %s", in->name);
break;
default:
@@ -419,6 +428,7 @@ vdev_scan(void)
mp_rep = &mp_reply.msgs[0];
resp = (struct vdev_param *)mp_rep->param;
VDEV_LOG(INFO, "Received %d vdevs", resp->num);
+ free(mp_reply.msgs);
} else
VDEV_LOG(ERR, "Failed to request vdev from primary");
@@ -455,6 +465,7 @@ vdev_scan(void)
continue;
}
+ dev->device.bus = &rte_vdev_bus;
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
@@ -480,7 +491,7 @@ vdev_probe(void)
* we call each driver probe.
*/
- if (dev->device.driver)
+ if (rte_dev_is_probed(&dev->device))
continue;
if (vdev_probe_all_drivers(dev)) {
@@ -493,9 +504,9 @@ vdev_probe(void)
return ret;
}
-static struct rte_device *
-vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
- const void *data)
+struct rte_device *
+rte_vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
{
const struct rte_vdev_device *vstart;
struct rte_vdev_device *dev;
@@ -532,10 +543,11 @@ vdev_unplug(struct rte_device *dev)
static struct rte_bus rte_vdev_bus = {
.scan = vdev_scan,
.probe = vdev_probe,
- .find_device = vdev_find_device,
+ .find_device = rte_vdev_find_device,
.plug = vdev_plug,
.unplug = vdev_unplug,
.parse = vdev_parse,
+ .dev_iterate = rte_vdev_dev_iterate,
};
RTE_REGISTER_BUS(vdev, rte_vdev_bus);
diff --git a/drivers/bus/vdev/vdev_params.c b/drivers/bus/vdev/vdev_params.c
new file mode 100644
index 00000000..6f74704d
--- /dev/null
+++ b/drivers/bus/vdev/vdev_params.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+
+#include "vdev_logs.h"
+#include "vdev_private.h"
+
+enum vdev_params {
+ RTE_VDEV_PARAM_NAME,
+ RTE_VDEV_PARAM_MAX,
+};
+
+static const char * const vdev_params_keys[] = {
+ [RTE_VDEV_PARAM_NAME] = "name",
+ [RTE_VDEV_PARAM_MAX] = NULL,
+};
+
+static int
+vdev_dev_match(const struct rte_device *dev,
+ const void *_kvlist)
+{
+ int ret;
+ const struct rte_kvargs *kvlist = _kvlist;
+ char *name;
+
+ /* cannot pass const dev->name to rte_kvargs_process() */
+ name = strdup(dev->name);
+ if (name == NULL)
+ return -1;
+ ret = rte_kvargs_process(kvlist,
+ vdev_params_keys[RTE_VDEV_PARAM_NAME],
+ rte_kvargs_strcmp, name);
+ free(name);
+ if (ret != 0)
+ return -1;
+
+ return 0;
+}
+
+void *
+rte_vdev_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it __rte_unused)
+{
+ struct rte_kvargs *kvargs = NULL;
+ struct rte_device *dev;
+
+ if (str != NULL) {
+ kvargs = rte_kvargs_parse(str, vdev_params_keys);
+ if (kvargs == NULL) {
+ VDEV_LOG(ERR, "cannot parse argument list\n");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ }
+ dev = rte_vdev_find_device(start, vdev_dev_match, kvargs);
+ rte_kvargs_free(kvargs);
+ return dev;
+}
diff --git a/drivers/bus/vdev/vdev_private.h b/drivers/bus/vdev/vdev_private.h
new file mode 100644
index 00000000..ba6dc48f
--- /dev/null
+++ b/drivers/bus/vdev/vdev_private.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#ifndef _VDEV_PRIVATE_H_
+#define _VDEV_PRIVATE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_device *
+rte_vdev_find_device(const struct rte_device *start,
+ rte_dev_cmp_t cmp,
+ const void *data);
+
+void *
+rte_vdev_dev_iterate(const void *start,
+ const char *str,
+ const struct rte_dev_iterator *it);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VDEV_PRIVATE_H_ */
diff --git a/drivers/bus/vmbus/Makefile b/drivers/bus/vmbus/Makefile
index deee9dd1..e54c557c 100644
--- a/drivers/bus/vmbus/Makefile
+++ b/drivers/bus/vmbus/Makefile
@@ -3,7 +3,7 @@
include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_bus_vmbus.a
-LIBABIVER := 1
+LIBABIVER := 2
EXPORT_MAP := rte_bus_vmbus_version.map
CFLAGS += -I$(SRCDIR)
diff --git a/drivers/bus/vmbus/linux/vmbus_bus.c b/drivers/bus/vmbus/linux/vmbus_bus.c
index 52d6a3c0..a4755a38 100644
--- a/drivers/bus/vmbus/linux/vmbus_bus.c
+++ b/drivers/bus/vmbus/linux/vmbus_bus.c
@@ -229,6 +229,7 @@ vmbus_scan_one(const char *name)
if (dev == NULL)
return -1;
+ dev->device.bus = &rte_vmbus_bus.bus;
dev->device.name = strdup(name);
if (!dev->device.name)
goto error;
@@ -276,6 +277,8 @@ vmbus_scan_one(const char *name)
dev->device.numa_node = SOCKET_ID_ANY;
}
+ dev->device.devargs = vmbus_devargs_lookup(dev);
+
/* device is valid, add in list (sorted) */
VMBUS_LOG(DEBUG, "Adding vmbus device %s", name);
diff --git a/drivers/bus/vmbus/meson.build b/drivers/bus/vmbus/meson.build
index 18daabec..0e4d058e 100644
--- a/drivers/bus/vmbus/meson.build
+++ b/drivers/bus/vmbus/meson.build
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
+version = 2
+
allow_experimental_apis = true
install_headers('rte_bus_vmbus.h','rte_vmbus_reg.h')
diff --git a/drivers/bus/vmbus/private.h b/drivers/bus/vmbus/private.h
index 9964fc42..211127dd 100644
--- a/drivers/bus/vmbus/private.h
+++ b/drivers/bus/vmbus/private.h
@@ -10,11 +10,14 @@
#include <sys/uio.h>
#include <rte_log.h>
#include <rte_vmbus_reg.h>
+#include <rte_bus_vmbus.h>
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
extern int vmbus_logtype_bus;
#define VMBUS_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, vmbus_logtype_bus, "%s(): " fmt "\n", \
@@ -66,6 +69,9 @@ struct vmbus_channel {
#define VMBUS_MAX_CHANNELS 64
+struct rte_devargs *
+vmbus_devargs_lookup(struct rte_vmbus_device *dev);
+
int vmbus_chan_create(const struct rte_vmbus_device *device,
uint16_t relid, uint16_t subid, uint8_t monitor_id,
struct vmbus_channel **new_chan);
diff --git a/drivers/bus/vmbus/rte_bus_vmbus.h b/drivers/bus/vmbus/rte_bus_vmbus.h
index 4a2c1f6f..2839fef5 100644
--- a/drivers/bus/vmbus/rte_bus_vmbus.h
+++ b/drivers/bus/vmbus/rte_bus_vmbus.h
@@ -365,6 +365,21 @@ void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan);
/**
+ * Set the host monitor latency hint
+ *
+ * @param dev
+ * VMBUS device
+ * @param chan
+ * Pointer to vmbus_channel structure.
+ * @param latency
+ * Approximate wait period between hypervisor examinations of
+ * the trigger page (in nanoseconds).
+ */
+void rte_vmbus_set_latency(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ uint32_t latency);
+
+/**
* Register a VMBUS driver.
*
* @param driver
diff --git a/drivers/bus/vmbus/rte_bus_vmbus_version.map b/drivers/bus/vmbus/rte_bus_vmbus_version.map
index dabb9203..ae231ad3 100644
--- a/drivers/bus/vmbus/rte_bus_vmbus_version.map
+++ b/drivers/bus/vmbus/rte_bus_vmbus_version.map
@@ -27,3 +27,10 @@ DPDK_18.08 {
local: *;
};
+
+DPDK_18.11 {
+ global:
+
+ rte_vmbus_set_latency;
+
+} DPDK_18.08;
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index cc5f3e83..bd14c066 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -60,6 +60,32 @@ vmbus_set_event(const struct rte_vmbus_device *dev,
}
/*
+ * Set the wait between when hypervisor examines the trigger.
+ */
+void
+rte_vmbus_set_latency(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ uint32_t latency)
+{
+ uint32_t trig_idx = chan->monitor_id / VMBUS_MONTRIG_LEN;
+ uint32_t trig_offs = chan->monitor_id % VMBUS_MONTRIG_LEN;
+
+ if (latency >= UINT16_MAX * 100) {
+ VMBUS_LOG(ERR, "invalid latency value %u", latency);
+ return;
+ }
+
+ if (trig_idx >= VMBUS_MONTRIGS_MAX) {
+ VMBUS_LOG(ERR, "invalid monitor trigger %u",
+ trig_idx);
+ return;
+ }
+
+ /* Host value is expressed in 100 nanosecond units */
+ dev->monitor_page->lat[trig_idx][trig_offs] = latency / 100;
+}
+
+/*
* Notify host that there are data pending on our TX bufring.
*
* Since this in userspace, rely on the monitor page.
diff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c
index c7165ad5..48a219f7 100644
--- a/drivers/bus/vmbus/vmbus_common.c
+++ b/drivers/bus/vmbus/vmbus_common.c
@@ -85,7 +85,6 @@ vmbus_match(const struct rte_vmbus_driver *dr,
return false;
}
-
/*
* If device ID match, call the devinit() function of the driver.
*/
@@ -112,7 +111,6 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
/* reference driver structure */
dev->driver = dr;
- dev->device.driver = &dr->driver;
if (dev->device.numa_node < 0) {
VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0");
@@ -125,6 +123,8 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
if (ret) {
dev->driver = NULL;
rte_vmbus_unmap_device(dev);
+ } else {
+ dev->device.driver = &dr->driver;
}
return ret;
@@ -143,7 +143,7 @@ vmbus_probe_all_drivers(struct rte_vmbus_device *dev)
int rc;
/* Check if a driver is already loaded */
- if (dev->driver != NULL) {
+ if (rte_dev_is_probed(&dev->device)) {
VMBUS_LOG(DEBUG, "VMBUS driver already loaded");
return 0;
}
@@ -204,6 +204,27 @@ vmbus_parse(const char *name, void *addr)
return ret;
}
+/*
+ * scan for matching device args on command line
+ * example:
+ * -w 'vmbus:635a7ae3-091e-4410-ad59-667c4f8c04c3,latency=20'
+ */
+struct rte_devargs *
+vmbus_devargs_lookup(struct rte_vmbus_device *dev)
+{
+ struct rte_devargs *devargs;
+ rte_uuid_t addr;
+
+ RTE_EAL_DEVARGS_FOREACH("vmbus", devargs) {
+ vmbus_parse(devargs->name, &addr);
+
+ if (rte_uuid_compare(dev->device_id, addr) == 0)
+ return devargs;
+ }
+ return NULL;
+
+}
+
/* register vmbus driver */
void
rte_vmbus_register(struct rte_vmbus_driver *driver)
diff --git a/drivers/common/Makefile b/drivers/common/Makefile
index 0fd22376..87b8a59a 100644
--- a/drivers/common/Makefile
+++ b/drivers/common/Makefile
@@ -4,8 +4,23 @@
include $(RTE_SDK)/mk/rte.vars.mk
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO),y)
+DIRS-y += cpt
+endif
+
ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy)
DIRS-y += octeontx
endif
+MVEP-y := $(CONFIG_RTE_LIBRTE_MVPP2_PMD)
+MVEP-y += $(CONFIG_RTE_LIBRTE_MVNETA_PMD)
+MVEP-y += $(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO)
+ifneq (,$(findstring y,$(MVEP-y)))
+DIRS-y += mvep
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_COMMON_DPAAX),y)
+DIRS-y += dpaax
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/common/cpt/Makefile b/drivers/common/cpt/Makefile
new file mode 100644
index 00000000..2340aa96
--- /dev/null
+++ b/drivers/common/cpt/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_cpt.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci
+EXPORT_MAP := rte_common_cpt_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += cpt_pmd_ops_helper.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
new file mode 100644
index 00000000..8461cd60
--- /dev/null
+++ b/drivers/common/cpt/cpt_common.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_COMMON_H_
+#define _CPT_COMMON_H_
+
+/*
+ * This file defines common macros and structs
+ */
+
+/*
+ * Macros to determine CPT model. Driver makefile will define CPT_MODEL
+ * accordingly
+ */
+#define CRYPTO_OCTEONTX 0x1
+
+#define TIME_IN_RESET_COUNT 5
+
+/* Default command timeout in seconds */
+#define DEFAULT_COMMAND_TIMEOUT 4
+
+#define CPT_COUNT_THOLD 32
+#define CPT_TIMER_THOLD 0x3F
+
+#define AE_TYPE 1
+#define SE_TYPE 2
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#ifndef ROUNDUP8
+#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
+#endif
+
+#ifndef ROUNDUP16
+#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
+#endif
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+#define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
+
+struct cptvf_meta_info {
+ void *cptvf_meta_pool;
+ int cptvf_op_mlen;
+ int cptvf_op_sb_mlen;
+};
+
+struct rid {
+ /** Request id of a crypto operation */
+ uintptr_t rid;
+};
+
+/*
+ * Pending queue structure
+ *
+ */
+struct pending_queue {
+ /** Tail of queue to be used for enqueue */
+ uint16_t enq_tail;
+ /** Head of queue to be used for dequeue */
+ uint16_t deq_head;
+ /** Array of pending requests */
+ struct rid *rid_queue;
+ /** Pending requests count */
+ uint64_t pending_count;
+};
+
+struct cpt_request_info {
+ /** Data path fields */
+ uint64_t comp_baddr;
+ volatile uint64_t *completion_addr;
+ volatile uint64_t *alternate_caddr;
+ void *op;
+ struct {
+ uint64_t ei0;
+ uint64_t ei1;
+ uint64_t ei2;
+ uint64_t ei3;
+ } ist;
+
+ /** Control path fields */
+ uint64_t time_out;
+ uint8_t extra_time;
+};
+
+#endif /* _CPT_COMMON_H_ */
diff --git a/drivers/common/cpt/cpt_hw_types.h b/drivers/common/cpt/cpt_hw_types.h
new file mode 100644
index 00000000..cff59c79
--- /dev/null
+++ b/drivers/common/cpt/cpt_hw_types.h
@@ -0,0 +1,522 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_HW_TYPES_H_
+#define _CPT_HW_TYPES_H_
+
+#include <rte_byteorder.h>
+
+/*
+ * This file defines HRM specific structs.
+ *
+ */
+
+#define CPT_VF_INTR_MBOX_MASK (1<<0)
+#define CPT_VF_INTR_DOVF_MASK (1<<1)
+#define CPT_VF_INTR_IRDE_MASK (1<<2)
+#define CPT_VF_INTR_NWRP_MASK (1<<3)
+#define CPT_VF_INTR_SWERR_MASK (1<<4)
+#define CPT_VF_INTR_HWERR_MASK (1<<5)
+#define CPT_VF_INTR_FAULT_MASK (1<<6)
+
+#define CPT_INST_SIZE (64)
+#define CPT_NEXT_CHUNK_PTR_SIZE (8)
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ uint16_t opcode;
+ uint16_t param1;
+ uint16_t param2;
+ uint16_t dlen;
+ } s;
+} vq_cmd_word0_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t grp : 3;
+ uint64_t cptr : 61;
+#else
+ uint64_t cptr : 61;
+ uint64_t grp : 3;
+#endif
+ } s;
+} vq_cmd_word3_t;
+
+typedef struct cpt_vq_command {
+ vq_cmd_word0_t cmd;
+ uint64_t dptr;
+ uint64_t rptr;
+ vq_cmd_word3_t cptr;
+} cpt_vq_cmd_t;
+
+/**
+ * Structure cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout.
+ * Instructions are stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ */
+typedef union cpt_inst_s {
+ uint64_t u[8];
+ struct cpt_inst_s_8s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ /* [ 16: 16] Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes,CPT()_VQ()_DONE[DONE]
+ * will be incremented, and based on the rules described
+ * there an interrupt may occur.
+ */
+ uint64_t doneint : 1;
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t doneint : 1;
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 1 - Big Endian */
+ /* [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ *
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ */
+ uint64_t res_addr : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t res_addr : 64;
+#endif /* Word 1 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 2 - Big Endian */
+ uint64_t reserved_172_191 : 20;
+ /* [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to
+ * use when CPT submits work to SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP()
+ * must map [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ */
+ uint64_t grp : 10;
+ /* [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use
+ * when CPT submits work to SSO.
+ */
+ uint64_t tt : 2;
+ /* [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when
+ * CPT submits work to SSO.
+ */
+ uint64_t tag : 32;
+#else /* Word 2 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t reserved_172_191 : 20;
+#endif /* Word 2 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 3 - Big Endian */
+ /** [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all
+ * context, output data, and result write operations are
+ * visible to other CNXXXX units and the cores.
+ * Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Internal:Bits <63:49>, <2:0> are ignored by hardware,
+ * treated as always 0x0.
+ **/
+ uint64_t wq_ptr : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 3 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 4 - Big Endian */
+ union {
+ /** [319:256] Engine instruction word 0. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei0 : 64;
+ vq_cmd_word0_t vq_cmd_w0;
+ };
+#else /* Word 4 - Little Endian */
+ union {
+ uint64_t ei0 : 64;
+ vq_cmd_word0_t vq_cmd_w0;
+ };
+#endif /* Word 4 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 5 - Big Endian */
+ union {
+ /** [383:320] Engine instruction word 1. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei1 : 64;
+ uint64_t dptr;
+ };
+#else /* Word 5 - Little Endian */
+ union {
+ uint64_t ei1 : 64;
+ uint64_t dptr;
+ };
+#endif /* Word 5 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 6 - Big Endian */
+ union {
+ /** [447:384] Engine instruction word 2. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei2 : 64;
+ uint64_t rptr;
+ };
+#else /* Word 6 - Little Endian */
+ union {
+ uint64_t ei2 : 64;
+ uint64_t rptr;
+ };
+#endif /* Word 6 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 7 - Big Endian */
+ union {
+ /** [511:448] Engine instruction word 3. Passed to the
+ * AE/SE.
+ **/
+ uint64_t ei3 : 64;
+ vq_cmd_word3_t vq_cmd_w3;
+ };
+#else /* Word 7 - Little Endian */
+ union {
+ uint64_t ei3 : 64;
+ vq_cmd_word3_t vq_cmd_w3;
+ };
+#endif /* Word 7 - End */
+ } s8x;
+} cpt_inst_s_t;
+
+/**
+ * Structure cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and each
+ * instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ */
+typedef union cpt_res_s {
+ uint64_t u[2];
+ struct cpt_res_s_8s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ /** [ 16: 16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ **/
+ uint64_t doneint : 1;
+ uint64_t reserved_8_15 : 8;
+ /** [ 7: 0] Indicates completion/error status of the CPT
+ * coprocessor for the associated instruction, as enumerated by
+ * CPT_COMP_E. Core software may write the memory location
+ * containing [COMPCODE] to 0x0 before ringing the doorbell, and
+ * then poll for completion by checking for a nonzero value.
+ *
+ * Once the core observes a nonzero [COMPCODE] value in this
+ * case, the CPT coprocessor will have also completed L2/DRAM
+ * write operations.
+ **/
+ uint64_t compcode : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t compcode : 8;
+ uint64_t reserved_8_15 : 8;
+ uint64_t doneint : 1;
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 1 - Big Endian */
+ uint64_t reserved_64_127 : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t reserved_64_127 : 64;
+#endif /* Word 1 - End */
+ } s8x;
+} cpt_res_s_t;
+
+/**
+ * Register (NCB) cpt#_vq#_ctl
+ *
+ * CPT VF Queue Control Registers
+ * This register configures queues. This register should be changed (other than
+ * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_ctl_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ /** [ 0: 0](R/W/H) Enables the logical instruction queue.
+ * See also CPT()_PF_Q()_CTL[CONT_ERR] and
+ * CPT()_VQ()_INPROG[INFLIGHT].
+ * 1 = Queue is enabled.
+ * 0 = Queue is disabled.
+ **/
+ uint64_t ena : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1;
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_ctl_t;
+
+/**
+ * Register (NCB) cpt#_vq#_done
+ *
+ * CPT Queue Done Count Registers
+ * These registers contain the per-queue instruction done count.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_done_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ /** [ 19: 0](R/W/H) Done count. When CPT_INST_S[DONEINT] set
+ * and that instruction completes,CPT()_VQ()_DONE[DONE] is
+ * incremented when the instruction finishes. Write to this
+ * field are for diagnostic use only; instead software writes
+ * CPT()_VQ()_DONE_ACK with the number of decrements for this
+ * field.
+ *
+ * Interrupts are sent as follows:
+ *
+ * When CPT()_VQ()_DONE[DONE] = 0, then no results are pending,
+ * the interrupt coalescing timer is held to zero, and an
+ * interrupt is not sent.
+ *
+ * When CPT()_VQ()_DONE[DONE] != 0, then the interrupt
+ * coalescing timer counts. If the counter is >= CPT()_VQ()_DONE
+ * _WAIT[TIME_WAIT]*1024, or CPT()_VQ()_DONE[DONE] >= CPT()_VQ()
+ * _DONE_WAIT[NUM_WAIT], i.e. enough time has passed or enough
+ * results have arrived, then the interrupt is sent. Otherwise,
+ * it is not sent due to coalescing.
+ *
+ * When CPT()_VQ()_DONE_ACK is written (or CPT()_VQ()_DONE is
+ * written but this is not typical), the interrupt coalescing
+ * timer restarts. Note after decrementing this interrupt
+ * equation is recomputed, for example if CPT()_VQ()_DONE[DONE]
+ * >= CPT()_VQ()_DONE_WAIT[NUM_WAIT] and because the timer is
+ * zero, the interrupt will be resent immediately. (This covers
+ * the race case between software acknowledging an interrupt and
+ * a result returning.)
+ *
+ * When CPT()_VQ()_DONE_ENA_W1S[DONE] = 0, interrupts are not
+ * sent, but the counting described above still occurs.
+ *
+ * Since CPT instructions complete out-of-order, if software is
+ * using completion interrupts the suggested scheme is to
+ * request a DONEINT on each request, and when an interrupt
+ * arrives perform a "greedy" scan for completions; even if a
+ * later command is acknowledged first this will not result in
+ * missing a completion.
+ *
+ * Software is responsible for making sure [DONE] does not
+ * overflow; for example by insuring there are not more than
+ * 2^20-1 instructions in flight that may request interrupts.
+ **/
+ uint64_t done : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t done : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_done_t;
+
+/**
+ * Register (NCB) cpt#_vq#_done_ack
+ *
+ * CPT Queue Done Count Ack Registers
+ * This register is written by software to acknowledge interrupts.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_done_ack_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ /** [ 19: 0](R/W/H) Number of decrements to CPT()_VQ()_DONE
+ * [DONE]. Reads CPT()_VQ()_DONE[DONE].
+ *
+ * Written by software to acknowledge interrupts. If CPT()_VQ()_
+ * DONE[DONE] is still nonzero the interrupt will be re-sent if
+ * the conditions described in CPT()_VQ()_DONE[DONE] are
+ * satisfied.
+ **/
+ uint64_t done_ack : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t done_ack : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_done_ack_t;
+
+/**
+ * Register (NCB) cpt#_vq#_done_wait
+ *
+ * CPT Queue Done Interrupt Coalescing Wait Registers
+ * Specifies the per queue interrupt coalescing settings.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_done_wait_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ /** [ 47: 32](R/W) Time hold-off. When CPT()_VQ()_DONE[DONE] =
+ * 0, or CPT()_VQ()_DONE_ACK is written a timer is cleared. When
+ * the timer reaches [TIME_WAIT]*1024 then interrupt coalescing
+ * ends; see CPT()_VQ()_DONE[DONE]. If 0x0, time coalescing is
+ * disabled.
+ **/
+ uint64_t time_wait : 16;
+ uint64_t reserved_20_31 : 12;
+ /** [ 19: 0](R/W) Number of messages hold-off. When
+ * CPT()_VQ()_DONE[DONE] >= [NUM_WAIT] then interrupt coalescing
+ * ends; see CPT()_VQ()_DONE[DONE]. If 0x0, same behavior as
+ * 0x1.
+ **/
+ uint64_t num_wait : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t num_wait : 20;
+ uint64_t reserved_20_31 : 12;
+ uint64_t time_wait : 16;
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_done_wait_t;
+
+/**
+ * Register (NCB) cpt#_vq#_doorbell
+ *
+ * CPT Queue Doorbell Registers
+ * Doorbells for the CPT instruction queues.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_doorbell_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell_cnt : 20;
+ /** [ 19: 0](R/W/H) Number of instruction queue 64-bit words
+ * to add to the CPT instruction doorbell count. Readback value
+ * is the the current number of pending doorbell requests.
+ *
+ * If counter overflows CPT()_VQ()_MISC_INT[DBELL_DOVF] is set.
+ *
+ * To reset the count back to zero, write one to clear
+ * CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF], then write a value
+ * of 2^20 minus the read [DBELL_CNT], then write one to
+ * CPT()_VQ()_MISC_INT_W1C[DBELL_DOVF] and
+ * CPT()_VQ()_MISC_INT_ENA_W1S[DBELL_DOVF].
+ *
+ * Must be a multiple of 8. All CPT instructions are 8 words
+ * and require a doorbell count of multiple of 8.
+ **/
+#else /* Word 0 - Little Endian */
+ uint64_t dbell_cnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_doorbell_t;
+
+/**
+ * Register (NCB) cpt#_vq#_inprog
+ *
+ * CPT Queue In Progress Count Registers
+ * These registers contain the per-queue instruction in flight registers.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_inprog_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ /** [ 7: 0](RO/H) Inflight count. Counts the number of
+ * instructions for the VF for which CPT is fetching, executing
+ * or responding to instructions. However this does not include
+ * any interrupts that are awaiting software handling
+ * (CPT()_VQ()_DONE[DONE] != 0x0).
+ *
+ * A queue may not be reconfigured until:
+ * 1. CPT()_VQ()_CTL[ENA] is cleared by software.
+ * 2. [INFLIGHT] is polled until equals to zero.
+ **/
+ uint64_t inflight : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t inflight : 8;
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_inprog_t;
+
+/**
+ * Register (NCB) cpt#_vq#_misc_int
+ *
+ * CPT Queue Misc Interrupt Register
+ * These registers contain the per-queue miscellaneous interrupts.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_misc_int_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ /** [ 6: 6](R/W1C/H) Translation fault detected. */
+ uint64_t fault : 1;
+ /** [ 5: 5](R/W1C/H) Hardware error from engines. */
+ uint64_t hwerr : 1;
+ /** [ 4: 4](R/W1C/H) Software error from engines. */
+ uint64_t swerr : 1;
+ /** [ 3: 3](R/W1C/H) NCB result write response error. */
+ uint64_t nwrp : 1;
+ /** [ 2: 2](R/W1C/H) Instruction NCB read response error. */
+ uint64_t irde : 1;
+ /** [ 1: 1](R/W1C/H) Doorbell overflow. */
+ uint64_t dovf : 1;
+ /** [ 0: 0](R/W1C/H) PF to VF mailbox interrupt. Set when
+ * CPT()_VF()_PF_MBOX(0) is written.
+ **/
+ uint64_t mbox : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1;
+ uint64_t dovf : 1;
+ uint64_t irde : 1;
+ uint64_t nwrp : 1;
+ uint64_t swerr : 1;
+ uint64_t hwerr : 1;
+ uint64_t fault : 1;
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_misc_int_t;
+
+/**
+ * Register (NCB) cpt#_vq#_saddr
+ *
+ * CPT Queue Starting Buffer Address Registers
+ * These registers set the instruction buffer starting address.
+ */
+typedef union {
+ uint64_t u;
+ struct cptx_vqx_saddr_s {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN) /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ /** [ 48: 6](R/W/H) Instruction buffer IOVA <48:6>
+ * (64-byte aligned). When written, it is the initial buffer
+ * starting address; when read, it is the next read pointer to
+ * be requested from L2C. The PTR field is overwritten with the
+ * next pointer each time that the command buffer segment is
+ * exhausted. New commands will then be read from the newly
+ * specified command buffer pointer.
+ **/
+ uint64_t ptr : 43;
+ uint64_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_5 : 6;
+ uint64_t ptr : 43;
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+} cptx_vqx_saddr_t;
+
+#endif /*_CPT_HW_TYPES_H_ */
diff --git a/drivers/common/cpt/cpt_mcode_defines.h b/drivers/common/cpt/cpt_mcode_defines.h
new file mode 100644
index 00000000..becc14f4
--- /dev/null
+++ b/drivers/common/cpt/cpt_mcode_defines.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_MCODE_DEFINES_H_
+#define _CPT_MCODE_DEFINES_H_
+
+#include <rte_byteorder.h>
+#include <rte_memory.h>
+
+/*
+ * This file defines macros and structures according to microcode spec
+ *
+ */
+/* SE opcodes */
+#define CPT_MAJOR_OP_FC 0x33
+#define CPT_MAJOR_OP_HASH 0x34
+#define CPT_MAJOR_OP_HMAC 0x35
+#define CPT_MAJOR_OP_ZUC_SNOW3G 0x37
+#define CPT_MAJOR_OP_KASUMI 0x38
+#define CPT_MAJOR_OP_MISC 0x01
+
+#define CPT_BYTE_16 16
+#define CPT_BYTE_24 24
+#define CPT_BYTE_32 32
+#define CPT_MAX_SG_IN_OUT_CNT 32
+#define CPT_MAX_SG_CNT (CPT_MAX_SG_IN_OUT_CNT/2)
+
+#define COMPLETION_CODE_SIZE 8
+#define COMPLETION_CODE_INIT 0
+
+#define SG_LIST_HDR_SIZE (8u)
+#define SG_ENTRY_SIZE sizeof(sg_comp_t)
+
+#define CPT_DMA_MODE (1 << 7)
+
+#define CPT_FROM_CTX 0
+#define CPT_FROM_DPTR 1
+
+#define FC_GEN 0x1
+#define ZUC_SNOW3G 0x2
+#define KASUMI 0x3
+#define HASH_HMAC 0x4
+
+#define ZS_EA 0x1
+#define ZS_IA 0x2
+#define K_F8 0x4
+#define K_F9 0x8
+
+#define CPT_OP_CIPHER_ENCRYPT 0x1
+#define CPT_OP_CIPHER_DECRYPT 0x2
+#define CPT_OP_CIPHER_MASK 0x3
+
+#define CPT_OP_AUTH_VERIFY 0x4
+#define CPT_OP_AUTH_GENERATE 0x8
+#define CPT_OP_AUTH_MASK 0xC
+
+#define CPT_OP_ENCODE (CPT_OP_CIPHER_ENCRYPT | CPT_OP_AUTH_GENERATE)
+#define CPT_OP_DECODE (CPT_OP_CIPHER_DECRYPT | CPT_OP_AUTH_VERIFY)
+
+/* #define CPT_ALWAYS_USE_SG_MODE */
+#define CPT_ALWAYS_USE_SEPARATE_BUF
+
+/*
+ * Parameters for Flexi Crypto
+ * requests
+ */
+#define VALID_AAD_BUF 0x01
+#define VALID_MAC_BUF 0x02
+#define VALID_IV_BUF 0x04
+#define SINGLE_BUF_INPLACE 0x08
+#define SINGLE_BUF_HEADTAILROOM 0x10
+
+#define ENCR_IV_OFFSET(__d_offs) ((__d_offs >> 32) & 0xffff)
+#define ENCR_OFFSET(__d_offs) ((__d_offs >> 16) & 0xffff)
+#define AUTH_OFFSET(__d_offs) (__d_offs & 0xffff)
+#define ENCR_DLEN(__d_lens) (__d_lens >> 32)
+#define AUTH_DLEN(__d_lens) (__d_lens & 0xffffffff)
+
+/* FC offset_control at start of DPTR in bytes */
+#define OFF_CTRL_LEN 8 /**< bytes */
+
+typedef enum {
+ MD5_TYPE = 1,
+ SHA1_TYPE = 2,
+ SHA2_SHA224 = 3,
+ SHA2_SHA256 = 4,
+ SHA2_SHA384 = 5,
+ SHA2_SHA512 = 6,
+ GMAC_TYPE = 7,
+ XCBC_TYPE = 8,
+ SHA3_SHA224 = 10,
+ SHA3_SHA256 = 11,
+ SHA3_SHA384 = 12,
+ SHA3_SHA512 = 13,
+ SHA3_SHAKE256 = 14,
+ SHA3_SHAKE512 = 15,
+
+ /* These are only for software use */
+ ZUC_EIA3 = 0x90,
+ SNOW3G_UIA2 = 0x91,
+ KASUMI_F9_CBC = 0x92,
+ KASUMI_F9_ECB = 0x93,
+} mc_hash_type_t;
+
+typedef enum {
+ /* To support passthrough */
+ PASSTHROUGH = 0x0,
+ /*
+ * These are defined by MC for Flexi crypto
+ * for field of 4 bits
+ */
+ DES3_CBC = 0x1,
+ DES3_ECB = 0x2,
+ AES_CBC = 0x3,
+ AES_ECB = 0x4,
+ AES_CFB = 0x5,
+ AES_CTR = 0x6,
+ AES_GCM = 0x7,
+ AES_XTS = 0x8,
+
+ /* These are only for software use */
+ ZUC_EEA3 = 0x90,
+ SNOW3G_UEA2 = 0x91,
+ KASUMI_F8_CBC = 0x92,
+ KASUMI_F8_ECB = 0x93,
+} mc_cipher_type_t;
+
+typedef enum {
+ AES_128_BIT = 0x1,
+ AES_192_BIT = 0x2,
+ AES_256_BIT = 0x3
+} mc_aes_type_t;
+
+typedef enum {
+ /* Microcode errors */
+ NO_ERR = 0x00,
+ ERR_OPCODE_UNSUPPORTED = 0x01,
+
+ /* SCATTER GATHER */
+ ERR_SCATTER_GATHER_WRITE_LENGTH = 0x02,
+ ERR_SCATTER_GATHER_LIST = 0x03,
+ ERR_SCATTER_GATHER_NOT_SUPPORTED = 0x04,
+
+ /* SE GC */
+ ERR_GC_LENGTH_INVALID = 0x41,
+ ERR_GC_RANDOM_LEN_INVALID = 0x42,
+ ERR_GC_DATA_LEN_INVALID = 0x43,
+ ERR_GC_DRBG_TYPE_INVALID = 0x44,
+ ERR_GC_CTX_LEN_INVALID = 0x45,
+ ERR_GC_CIPHER_UNSUPPORTED = 0x46,
+ ERR_GC_AUTH_UNSUPPORTED = 0x47,
+ ERR_GC_OFFSET_INVALID = 0x48,
+ ERR_GC_HASH_MODE_UNSUPPORTED = 0x49,
+ ERR_GC_DRBG_ENTROPY_LEN_INVALID = 0x4a,
+ ERR_GC_DRBG_ADDNL_LEN_INVALID = 0x4b,
+ ERR_GC_ICV_MISCOMPARE = 0x4c,
+ ERR_GC_DATA_UNALIGNED = 0x4d,
+
+ /* API Layer */
+ ERR_BAD_ALT_CCODE = 0xfd,
+ ERR_REQ_PENDING = 0xfe,
+ ERR_REQ_TIMEOUT = 0xff,
+
+ ERR_BAD_INPUT_LENGTH = (0x40000000 | 384), /* 0x40000180 */
+ ERR_BAD_KEY_LENGTH,
+ ERR_BAD_KEY_HANDLE,
+ ERR_BAD_CONTEXT_HANDLE,
+ ERR_BAD_SCALAR_LENGTH,
+ ERR_BAD_DIGEST_LENGTH,
+ ERR_BAD_INPUT_ARG,
+ ERR_BAD_RECORD_PADDING,
+ ERR_NB_REQUEST_PENDING,
+ ERR_EIO,
+ ERR_ENODEV,
+} mc_error_code_t;
+
+/**
+ * Enumeration cpt_comp_e
+ *
+ * CPT Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+typedef enum {
+ CPT_8X_COMP_E_NOTDONE = (0x00),
+ CPT_8X_COMP_E_GOOD = (0x01),
+ CPT_8X_COMP_E_FAULT = (0x02),
+ CPT_8X_COMP_E_SWERR = (0x03),
+ CPT_8X_COMP_E_HWERR = (0x04),
+ CPT_8X_COMP_E_LAST_ENTRY = (0xFF)
+} cpt_comp_e_t;
+
+typedef struct sglist_comp {
+ union {
+ uint64_t len;
+ struct {
+ uint16_t len[4];
+ } s;
+ } u;
+ uint64_t ptr[4];
+} sg_comp_t;
+
+struct cpt_sess_misc {
+ /** CPT opcode */
+ uint16_t cpt_op:4;
+ /** ZUC, SNOW3G & KASUMI flags */
+ uint16_t zsk_flag:4;
+ /** Flag for AES GCM */
+ uint16_t aes_gcm:1;
+ /** Flag for AES CTR */
+ uint16_t aes_ctr:1;
+ /** Flag for NULL cipher/auth */
+ uint16_t is_null:1;
+ /** Flag for GMAC */
+ uint16_t is_gmac:1;
+ /** AAD length */
+ uint16_t aad_length;
+ /** MAC len in bytes */
+ uint8_t mac_len;
+ /** IV length in bytes */
+ uint8_t iv_length;
+ /** Auth IV length in bytes */
+ uint8_t auth_iv_length;
+ /** Reserved field */
+ uint8_t rsvd1;
+ /** IV offset in bytes */
+ uint16_t iv_offset;
+ /** Auth IV offset in bytes */
+ uint16_t auth_iv_offset;
+ /** Salt */
+ uint32_t salt;
+ /** Context DMA address */
+ phys_addr_t ctx_dma_addr;
+};
+
+typedef union {
+ uint64_t flags;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t enc_cipher : 4;
+ uint64_t reserved1 : 1;
+ uint64_t aes_key : 2;
+ uint64_t iv_source : 1;
+ uint64_t hash_type : 4;
+ uint64_t reserved2 : 3;
+ uint64_t auth_input_type : 1;
+ uint64_t mac_len : 8;
+ uint64_t reserved3 : 8;
+ uint64_t encr_offset : 16;
+ uint64_t iv_offset : 8;
+ uint64_t auth_offset : 8;
+#else
+ uint64_t auth_offset : 8;
+ uint64_t iv_offset : 8;
+ uint64_t encr_offset : 16;
+ uint64_t reserved3 : 8;
+ uint64_t mac_len : 8;
+ uint64_t auth_input_type : 1;
+ uint64_t reserved2 : 3;
+ uint64_t hash_type : 4;
+ uint64_t iv_source : 1;
+ uint64_t aes_key : 2;
+ uint64_t reserved1 : 1;
+ uint64_t enc_cipher : 4;
+#endif
+ } e;
+} encr_ctrl_t;
+
+typedef struct {
+ encr_ctrl_t enc_ctrl;
+ uint8_t encr_key[32];
+ uint8_t encr_iv[16];
+} mc_enc_context_t;
+
+typedef struct {
+ uint8_t ipad[64];
+ uint8_t opad[64];
+} mc_fc_hmac_context_t;
+
+typedef struct {
+ mc_enc_context_t enc;
+ mc_fc_hmac_context_t hmac;
+} mc_fc_context_t;
+
+typedef struct {
+ uint8_t encr_auth_iv[16];
+ uint8_t ci_key[16];
+ uint8_t zuc_const[32];
+} mc_zuc_snow3g_ctx_t;
+
+typedef struct {
+ uint8_t reg_A[8];
+ uint8_t ci_key[16];
+} mc_kasumi_ctx_t;
+
+struct cpt_ctx {
+ /* Below fields are accessed by sw */
+ uint64_t enc_cipher :8;
+ uint64_t hash_type :8;
+ uint64_t mac_len :8;
+ uint64_t auth_key_len :8;
+ uint64_t fc_type :4;
+ uint64_t hmac :1;
+ uint64_t zsk_flags :3;
+ uint64_t k_ecb :1;
+ uint64_t snow3g :1;
+ uint64_t rsvd :22;
+ /* Below fields are accessed by hardware */
+ union {
+ mc_fc_context_t fctx;
+ mc_zuc_snow3g_ctx_t zs_ctx;
+ mc_kasumi_ctx_t k_ctx;
+ };
+ uint8_t auth_key[64];
+};
+
+/* Buffer pointer */
+typedef struct buf_ptr {
+ void *vaddr;
+ phys_addr_t dma_addr;
+ uint32_t size;
+ uint32_t resv;
+} buf_ptr_t;
+
+/* IOV Pointer */
+typedef struct{
+ int buf_cnt;
+ buf_ptr_t bufs[0];
+} iov_ptr_t;
+
+typedef union opcode_info {
+ uint16_t flags;
+ struct {
+ uint8_t major;
+ uint8_t minor;
+ } s;
+} opcode_info_t;
+
+typedef struct fc_params {
+ /* 0th cache line */
+ union {
+ buf_ptr_t bufs[1];
+ struct {
+ iov_ptr_t *src_iov;
+ iov_ptr_t *dst_iov;
+ };
+ };
+ void *iv_buf;
+ void *auth_iv_buf;
+ buf_ptr_t meta_buf;
+ buf_ptr_t ctx_buf;
+ uint64_t rsvd2;
+
+ /* 1st cache line */
+ buf_ptr_t aad_buf;
+ buf_ptr_t mac_buf;
+
+} fc_params_t;
+
+/*
+ * Parameters for digest
+ * generate requests
+ * Only src_iov, op, ctx_buf, mac_buf, prep_req
+ * meta_buf, auth_data_len are used for digest gen.
+ */
+typedef struct fc_params digest_params_t;
+
+/* Cipher Algorithms */
+typedef mc_cipher_type_t cipher_type_t;
+
+/* Auth Algorithms */
+typedef mc_hash_type_t auth_type_t;
+
+/* Helper macros */
+
+#define CPT_P_ENC_CTRL(fctx) fctx->enc.enc_ctrl.e
+
+#define SRC_IOV_SIZE \
+ (sizeof(iov_ptr_t) + (sizeof(buf_ptr_t) * CPT_MAX_SG_CNT))
+#define DST_IOV_SIZE \
+ (sizeof(iov_ptr_t) + (sizeof(buf_ptr_t) * CPT_MAX_SG_CNT))
+
+#define SESS_PRIV(__sess) \
+ (void *)((uint8_t *)__sess + sizeof(struct cpt_sess_misc))
+
+#endif /* _CPT_MCODE_DEFINES_H_ */
diff --git a/drivers/common/cpt/cpt_pmd_logs.h b/drivers/common/cpt/cpt_pmd_logs.h
new file mode 100644
index 00000000..4cbec4e3
--- /dev/null
+++ b/drivers/common/cpt/cpt_pmd_logs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_PMD_LOGS_H_
+#define _CPT_PMD_LOGS_H_
+
+#include <rte_log.h>
+
+/*
+ * This file defines log macros
+ */
+
+#define CPT_PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, cpt_logtype, \
+ "cpt: %s(): " fmt "\n", __func__, ##args)
+
+#define CPT_PMD_INIT_FUNC_TRACE() CPT_PMD_DRV_LOG_RAW(DEBUG, " >>")
+
+#define CPT_LOG_INFO(fmt, args...) \
+ CPT_PMD_DRV_LOG_RAW(INFO, fmt, ## args)
+#define CPT_LOG_WARN(fmt, args...) \
+ CPT_PMD_DRV_LOG_RAW(WARNING, fmt, ## args)
+#define CPT_LOG_ERR(fmt, args...) \
+ CPT_PMD_DRV_LOG_RAW(ERR, fmt, ## args)
+
+/*
+ * DP logs, toggled out at compile time if level lower than current level.
+ * DP logs would be logged under 'PMD' type. So for dynamic logging, the
+ * level of 'pmd' has to be used.
+ */
+#define CPT_LOG_DP(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt "\n", ## args)
+
+#define CPT_LOG_DP_DEBUG(fmt, args...) \
+ CPT_LOG_DP(DEBUG, fmt, ## args)
+#define CPT_LOG_DP_INFO(fmt, args...) \
+ CPT_LOG_DP(INFO, fmt, ## args)
+#define CPT_LOG_DP_WARN(fmt, args...) \
+ CPT_LOG_DP(WARNING, fmt, ## args)
+#define CPT_LOG_DP_ERR(fmt, args...) \
+ CPT_LOG_DP(ERR, fmt, ## args)
+
+/*
+ * cpt_logtype will be used for common logging. This field would be initialized
+ * by otx_* driver routines during PCI probe.
+ */
+int cpt_logtype;
+
+#endif /* _CPT_PMD_LOGS_H_ */
diff --git a/drivers/common/cpt/cpt_pmd_ops_helper.c b/drivers/common/cpt/cpt_pmd_ops_helper.c
new file mode 100644
index 00000000..1c18180f
--- /dev/null
+++ b/drivers/common/cpt/cpt_pmd_ops_helper.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_common.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+#include "cpt_pmd_ops_helper.h"
+
+#define CPT_MAX_IV_LEN 16
+#define CPT_OFFSET_CONTROL_BYTES 8
+
+int32_t
+cpt_pmd_ops_helper_get_mlen_direct_mode(void)
+{
+ uint32_t len = 0;
+
+ /* Request structure */
+ len = sizeof(struct cpt_request_info);
+
+ /* CPT HW result structure plus extra as it is aligned */
+ len += 2*sizeof(cpt_res_s_t);
+
+ return len;
+}
+
+int
+cpt_pmd_ops_helper_get_mlen_sg_mode(void)
+{
+ uint32_t len = 0;
+
+ len += sizeof(struct cpt_request_info);
+ len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN;
+ len += ROUNDUP8(SG_LIST_HDR_SIZE +
+ (ROUNDUP4(CPT_MAX_SG_IN_OUT_CNT) >> 2) * SG_ENTRY_SIZE);
+ len += 2 * COMPLETION_CODE_SIZE;
+ len += 2 * sizeof(cpt_res_s_t);
+ return len;
+}
diff --git a/drivers/common/cpt/cpt_pmd_ops_helper.h b/drivers/common/cpt/cpt_pmd_ops_helper.h
new file mode 100644
index 00000000..dd32f9a4
--- /dev/null
+++ b/drivers/common/cpt/cpt_pmd_ops_helper.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_PMD_OPS_HELPER_H_
+#define _CPT_PMD_OPS_HELPER_H_
+
+/*
+ * This file defines the agreement between the common layer and the individual
+ * crypto drivers for OCTEON TX series. Control path in otx* directory can
+ * directly call functions declared here.
+ */
+
+/*
+ * Get meta length required when operating in direct mode (single buffer
+ * in-place)
+ *
+ * @return
+ * - length
+ */
+
+int32_t
+cpt_pmd_ops_helper_get_mlen_direct_mode(void);
+
+/*
+ * Get size of contiguous meta buffer to be allocated when working in scatter
+ * gather mode.
+ *
+ * @return
+ * - length
+ */
+int
+cpt_pmd_ops_helper_get_mlen_sg_mode(void);
+#endif /* _CPT_PMD_OPS_HELPER_H_ */
diff --git a/drivers/common/cpt/cpt_request_mgr.h b/drivers/common/cpt/cpt_request_mgr.h
new file mode 100644
index 00000000..4463cfbe
--- /dev/null
+++ b/drivers/common/cpt/cpt_request_mgr.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_REQUEST_MGR_H_
+#define _CPT_REQUEST_MGR_H_
+
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+
+#include "cpt_common.h"
+#include "cpt_mcode_defines.h"
+
+#if CPT_MODEL == CRYPTO_OCTEONTX
+#include "../../crypto/octeontx/otx_cryptodev_hw_access.h"
+#endif
+
+/*
+ * This file defines the agreement between the common layer and the individual
+ * crypto drivers for OCTEON TX series. Datapath in otx* directory include this
+ * file and all these functions are static inlined for better performance.
+ *
+ */
+
+/*
+ * Get the session size
+ *
+ * This function is used in the data path.
+ *
+ * @return
+ * - session size
+ */
+static __rte_always_inline unsigned int
+cpt_get_session_size(void)
+{
+ unsigned int ctx_len = sizeof(struct cpt_ctx);
+ return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
+}
+
+static __rte_always_inline int32_t __hot
+cpt_enqueue_req(struct cpt_instance *instance, struct pending_queue *pqueue,
+ void *req)
+{
+ struct cpt_request_info *user_req = (struct cpt_request_info *)req;
+ int32_t ret = 0;
+
+ if (unlikely(!req))
+ return 0;
+
+ if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
+ return -EAGAIN;
+
+ fill_cpt_inst(instance, req);
+
+ CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
+
+ /* Fill time_out cycles */
+ user_req->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+ user_req->extra_time = 0;
+
+ /* Default mode of software queue */
+ mark_cpt_inst(instance);
+
+ pqueue->rid_queue[pqueue->enq_tail].rid =
+ (uintptr_t)user_req;
+ /* We will use soft queue length here to limit
+ * requests
+ */
+ MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
+ pqueue->pending_count += 1;
+
+ CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
+ "op: %p", user_req, user_req->op);
+
+ return ret;
+}
+
+static __rte_always_inline int __hot
+cpt_pmd_crypto_operation(struct cpt_instance *instance,
+ struct rte_crypto_op *op, struct pending_queue *pqueue,
+ uint8_t cpt_driver_id)
+{
+ struct cpt_sess_misc *sess = NULL;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ void *prep_req = NULL, *mdata = NULL;
+ int ret = 0;
+ uint64_t cpt_op;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ int sess_len;
+
+ sess_len = cpt_get_session_size();
+
+ sess = rte_calloc(__func__, 1, sess_len, 8);
+ if (!sess)
+ return -ENOMEM;
+
+ sess->ctx_dma_addr = rte_malloc_virt2iova(sess) +
+ sizeof(struct cpt_sess_misc);
+
+ ret = instance_session_cfg(sym_op->xform, (void *)sess);
+ if (unlikely(ret))
+ return -EINVAL;
+ } else {
+ sess = (struct cpt_sess_misc *)
+ get_sym_session_private_data(sym_op->session,
+ cpt_driver_id);
+ }
+
+ cpt_op = sess->cpt_op;
+
+ mdata = &(cptvf->meta_info);
+
+ if (likely(cpt_op & CPT_OP_CIPHER_MASK))
+ prep_req = fill_fc_params(op, sess, &mdata, &ret);
+ else
+ prep_req = fill_digest_params(op, sess, &mdata, &ret);
+
+ if (unlikely(!prep_req)) {
+ CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
+ "ret 0x%x", op, (unsigned int)cpt_op, ret);
+ goto req_fail;
+ }
+
+ /* Enqueue prepared instruction to HW */
+ ret = cpt_enqueue_req(instance, pqueue, prep_req);
+
+ if (unlikely(ret)) {
+ if (unlikely(ret == -EAGAIN))
+ goto req_fail;
+ CPT_LOG_DP_ERR("Error enqueing crypto request : error "
+ "code %d", ret);
+ goto req_fail;
+ }
+
+ return 0;
+
+req_fail:
+ if (mdata)
+ free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
+ return ret;
+}
+
+static __rte_always_inline int32_t __hot
+cpt_dequeue_burst(struct cpt_instance *instance, uint16_t cnt,
+ void *resp[], uint8_t cc[], struct pending_queue *pqueue)
+{
+ struct cpt_request_info *user_req;
+ struct rid *rid_e;
+ int i, count, pcount;
+ uint8_t ret;
+
+ pcount = pqueue->pending_count;
+ count = (cnt > pcount) ? pcount : cnt;
+
+ for (i = 0; i < count; i++) {
+ rid_e = &pqueue->rid_queue[pqueue->deq_head];
+ user_req = (struct cpt_request_info *)(rid_e->rid);
+
+ if (likely((i+1) < count))
+ rte_prefetch_non_temporal((void *)rid_e[1].rid);
+
+ ret = check_nb_command_id(user_req, instance);
+
+ if (unlikely(ret == ERR_REQ_PENDING)) {
+ /* Stop checking for completions */
+ break;
+ }
+
+ /* Return completion code and op handle */
+ cc[i] = (uint8_t)ret;
+ resp[i] = user_req->op;
+ CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
+ user_req, user_req->op, ret);
+
+ MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
+ pqueue->pending_count -= 1;
+ }
+
+ return i;
+}
+
+#endif /* _CPT_REQUEST_MGR_H_ */
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
new file mode 100644
index 00000000..c5a9f34b
--- /dev/null
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -0,0 +1,3648 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _CPT_UCODE_H_
+#define _CPT_UCODE_H_
+#include <stdbool.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+
+/*
+ * This file defines functions that are interfaces to microcode spec.
+ *
+ */
+
+static uint8_t zuc_d[32] = {
+ 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
+ 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
+ 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
+ 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
+};
+
+static __rte_always_inline int
+cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
+{
+ /*
+ * Microcode only supports the following combination.
+ * Encryption followed by authentication
+ * Authentication followed by decryption
+ */
+ if (xform->next) {
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
+ /* Unsupported as of now by microcode */
+ CPT_LOG_DP_ERR("Unsupported combination");
+ return -1;
+ }
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+ (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
+ /* For GMAC auth there is no cipher operation */
+ if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
+ xform->next->auth.algo !=
+ RTE_CRYPTO_AUTH_AES_GMAC) {
+ /* Unsupported as of now by microcode */
+ CPT_LOG_DP_ERR("Unsupported combination");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static __rte_always_inline void
+gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
+{
+ int i, base;
+
+ for (i = 0; i < 4; i++) {
+ base = 4 * i;
+ keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
+ (ck[base + 2] << 8) | (ck[base + 3]);
+ keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
+ }
+}
+
+static __rte_always_inline void
+cpt_fc_salt_update(void *ctx,
+ uint8_t *salt)
+{
+ struct cpt_ctx *cpt_ctx = ctx;
+ memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_validate_key_aes(uint16_t key_len)
+{
+ switch (key_len) {
+ case CPT_BYTE_16:
+ case CPT_BYTE_24:
+ case CPT_BYTE_32:
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
+ uint16_t key_len)
+{
+ int fc_type = 0;
+ switch (type) {
+ case PASSTHROUGH:
+ fc_type = FC_GEN;
+ break;
+ case DES3_CBC:
+ case DES3_ECB:
+ fc_type = FC_GEN;
+ break;
+ case AES_CBC:
+ case AES_ECB:
+ case AES_CFB:
+ case AES_CTR:
+ case AES_GCM:
+ if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
+ return -1;
+ fc_type = FC_GEN;
+ break;
+ case AES_XTS:
+ key_len = key_len / 2;
+ if (unlikely(key_len == CPT_BYTE_24)) {
+ CPT_LOG_DP_ERR("Invalid AES key len for XTS");
+ return -1;
+ }
+ if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
+ return -1;
+ fc_type = FC_GEN;
+ break;
+ case ZUC_EEA3:
+ case SNOW3G_UEA2:
+ if (unlikely(key_len != 16))
+ return -1;
+ /* No support for AEAD yet */
+ if (unlikely(cpt_ctx->hash_type))
+ return -1;
+ fc_type = ZUC_SNOW3G;
+ break;
+ case KASUMI_F8_CBC:
+ case KASUMI_F8_ECB:
+ if (unlikely(key_len != 16))
+ return -1;
+ /* No support for AEAD yet */
+ if (unlikely(cpt_ctx->hash_type))
+ return -1;
+ fc_type = KASUMI;
+ break;
+ default:
+ return -1;
+ }
+ return fc_type;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
+{
+ cpt_ctx->enc_cipher = 0;
+ CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
+{
+ mc_aes_type_t aes_key_type = 0;
+ switch (key_len) {
+ case CPT_BYTE_16:
+ aes_key_type = AES_128_BIT;
+ break;
+ case CPT_BYTE_24:
+ aes_key_type = AES_192_BIT;
+ break;
+ case CPT_BYTE_32:
+ aes_key_type = AES_256_BIT;
+ break;
+ default:
+ /* This should not happen */
+ CPT_LOG_DP_ERR("Invalid AES key len");
+ return;
+ }
+ CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
+ uint16_t key_len)
+{
+ uint32_t keyx[4];
+ cpt_ctx->snow3g = 1;
+ gen_key_snow3g(key, keyx);
+ memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ cpt_ctx->fc_type = ZUC_SNOW3G;
+ cpt_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
+ uint16_t key_len)
+{
+ cpt_ctx->snow3g = 0;
+ memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
+ memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ cpt_ctx->fc_type = ZUC_SNOW3G;
+ cpt_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
+ uint16_t key_len)
+{
+ cpt_ctx->k_ecb = 1;
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->zsk_flags = 0;
+ cpt_ctx->fc_type = KASUMI;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
+ uint16_t key_len)
+{
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->zsk_flags = 0;
+ cpt_ctx->fc_type = KASUMI;
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
+ uint16_t key_len, uint8_t *salt)
+{
+ struct cpt_ctx *cpt_ctx = ctx;
+ mc_fc_context_t *fctx = &cpt_ctx->fctx;
+ uint64_t *ctrl_flags = NULL;
+ int fc_type;
+
+ /* Validate key before proceeding */
+ fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
+ if (unlikely(fc_type == -1))
+ return -1;
+
+ if (fc_type == FC_GEN) {
+ cpt_ctx->fc_type = FC_GEN;
+ ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
+ *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
+ /*
+ * We need to always say IV is from DPTR as user can
+ * sometimes iverride IV per operation.
+ */
+ CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
+ }
+
+ switch (type) {
+ case PASSTHROUGH:
+ cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
+ goto fc_success;
+ case DES3_CBC:
+ /* CPT performs DES using 3DES with the 8B DES-key
+ * replicated 2 more times to match the 24B 3DES-key.
+ * Eg. If org. key is "0x0a 0x0b", then new key is
+ * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
+ */
+ if (key_len == 8) {
+ /* Skipping the first 8B as it will be copied
+ * in the regular code flow
+ */
+ memcpy(fctx->enc.encr_key+key_len, key, key_len);
+ memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
+ }
+ break;
+ case DES3_ECB:
+ /* For DES3_ECB IV need to be from CTX. */
+ CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
+ break;
+ case AES_CBC:
+ case AES_ECB:
+ case AES_CFB:
+ case AES_CTR:
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+ break;
+ case AES_GCM:
+ /* Even though iv source is from dptr,
+ * aes_gcm salt is taken from ctx
+ */
+ if (salt) {
+ memcpy(fctx->enc.encr_iv, salt, 4);
+ /* Assuming it was just salt update
+ * and nothing else
+ */
+ if (!key)
+ goto fc_success;
+ }
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+ break;
+ case AES_XTS:
+ key_len = key_len / 2;
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+
+ /* Copy key2 for XTS into ipad */
+ memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
+ memcpy(fctx->hmac.ipad, &key[key_len], key_len);
+ break;
+ case SNOW3G_UEA2:
+ cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
+ goto success;
+ case ZUC_EEA3:
+ cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
+ goto success;
+ case KASUMI_F8_ECB:
+ cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
+ goto success;
+ case KASUMI_F8_CBC:
+ cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
+ goto success;
+ default:
+ break;
+ }
+
+ /* Only for FC_GEN case */
+
+ /* For GMAC auth, cipher must be NULL */
+ if (cpt_ctx->hash_type != GMAC_TYPE)
+ CPT_P_ENC_CTRL(fctx).enc_cipher = type;
+
+ memcpy(fctx->enc.encr_key, key, key_len);
+
+fc_success:
+ *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
+
+success:
+ cpt_ctx->enc_cipher = type;
+
+ return 0;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp(sg_comp_t *list,
+ uint32_t i,
+ phys_addr_t dma_addr,
+ uint32_t size)
+{
+ sg_comp_t *to = &list[i>>2];
+
+ to->u.s.len[i%4] = rte_cpu_to_be_16(size);
+ to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
+ i++;
+ return i;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp_from_buf(sg_comp_t *list,
+ uint32_t i,
+ buf_ptr_t *from)
+{
+ sg_comp_t *to = &list[i>>2];
+
+ to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
+ to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
+ i++;
+ return i;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp_from_buf_min(sg_comp_t *list,
+ uint32_t i,
+ buf_ptr_t *from,
+ uint32_t *psize)
+{
+ sg_comp_t *to = &list[i >> 2];
+ uint32_t size = *psize;
+ uint32_t e_len;
+
+ e_len = (size > from->size) ? from->size : size;
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
+ *psize -= e_len;
+ i++;
+ return i;
+}
+
+/*
+ * This fills the MC expected SGIO list
+ * from IOV given by user.
+ */
+static __rte_always_inline uint32_t
+fill_sg_comp_from_iov(sg_comp_t *list,
+ uint32_t i,
+ iov_ptr_t *from, uint32_t from_offset,
+ uint32_t *psize, buf_ptr_t *extra_buf,
+ uint32_t extra_offset)
+{
+ int32_t j;
+ uint32_t extra_len = extra_buf ? extra_buf->size : 0;
+ uint32_t size = *psize - extra_len;
+ buf_ptr_t *bufs;
+
+ bufs = from->bufs;
+ for (j = 0; (j < from->buf_cnt) && size; j++) {
+ phys_addr_t e_dma_addr;
+ uint32_t e_len;
+ sg_comp_t *to = &list[i >> 2];
+
+ if (!bufs[j].size)
+ continue;
+
+ if (unlikely(from_offset)) {
+ if (from_offset >= bufs[j].size) {
+ from_offset -= bufs[j].size;
+ continue;
+ }
+ e_dma_addr = bufs[j].dma_addr + from_offset;
+ e_len = (size > (bufs[j].size - from_offset)) ?
+ (bufs[j].size - from_offset) : size;
+ from_offset = 0;
+ } else {
+ e_dma_addr = bufs[j].dma_addr;
+ e_len = (size > bufs[j].size) ?
+ bufs[j].size : size;
+ }
+
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
+
+ if (extra_len && (e_len >= extra_offset)) {
+ /* Break the data at given offset */
+ uint32_t next_len = e_len - extra_offset;
+ phys_addr_t next_dma = e_dma_addr + extra_offset;
+
+ if (!extra_offset) {
+ i--;
+ } else {
+ e_len = extra_offset;
+ size -= e_len;
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ }
+
+ /* Insert extra data ptr */
+ if (extra_len) {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] =
+ rte_cpu_to_be_16(extra_buf->size);
+ to->ptr[i % 4] =
+ rte_cpu_to_be_64(extra_buf->dma_addr);
+
+ /* size already decremented by extra len */
+ }
+
+ /* insert the rest of the data */
+ if (next_len) {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
+ size -= next_len;
+ }
+ extra_len = 0;
+
+ } else {
+ size -= e_len;
+ }
+ if (extra_offset)
+ extra_offset -= size;
+ i++;
+ }
+
+ *psize = size;
+ return (uint32_t)i;
+}
+
+static __rte_always_inline int
+cpt_digest_gen_prep(uint32_t flags,
+ uint64_t d_lens,
+ digest_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ struct cpt_request_info *req;
+ uint32_t size, i;
+ int32_t m_size;
+ uint16_t data_len, mac_len, key_len;
+ auth_type_t hash_type;
+ buf_ptr_t *meta_p;
+ struct cpt_ctx *ctx;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr, *m_vaddr;
+ uint64_t c_dma, m_dma;
+ opcode_info_t opcode;
+
+ if (!params || !params->ctx_buf.vaddr)
+ return ERR_BAD_INPUT_ARG;
+
+ ctx = params->ctx_buf.vaddr;
+ meta_p = &params->meta_buf;
+
+ if (!meta_p->vaddr || !meta_p->dma_addr)
+ return ERR_BAD_INPUT_ARG;
+
+ if (meta_p->size < sizeof(struct cpt_request_info))
+ return ERR_BAD_INPUT_ARG;
+
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+ m_size = meta_p->size;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ hash_type = ctx->hash_type;
+ mac_len = ctx->mac_len;
+ key_len = ctx->auth_key_len;
+ data_len = AUTH_DLEN(d_lens);
+
+ /*GP op header */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
+ if (ctx->hmac) {
+ opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
+ vq_cmd_w0.s.dlen =
+ rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
+ } else {
+ opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
+ vq_cmd_w0.s.param1 = 0;
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
+ }
+
+ opcode.s.minor = 0;
+
+ /* Null auth only case enters the if */
+ if (unlikely(!hash_type && !ctx->enc_cipher)) {
+ opcode.s.major = CPT_MAJOR_OP_MISC;
+ /* Minor op is passthrough */
+ opcode.s.minor = 0x03;
+ /* Send out completion code only */
+ vq_cmd_w0.s.param2 = 0x1;
+ }
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input gather list
+ */
+
+ i = 0;
+
+ if (ctx->hmac) {
+ uint64_t k_dma = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, auth_key);
+ /* Key */
+ i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
+ }
+
+ /* input data */
+ size = data_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
+ 0, &size, NULL, 0);
+ if (size) {
+ CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
+ " by %dB", size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ } else {
+ /*
+ * Looks like we need to support zero data
+ * gather ptr in case of hash & hmac
+ */
+ i++;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Gather list
+ */
+
+ i = 0;
+ scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags & VALID_MAC_BUF) {
+ if (params->mac_buf.size < mac_len)
+ return ERR_BAD_INPUT_ARG;
+
+ size = mac_len;
+ i = fill_sg_comp_from_buf_min(scatter_comp, i,
+ &params->mac_buf, &size);
+ } else {
+ size = mac_len;
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->src_iov, data_len,
+ &size, NULL, 0);
+ if (size) {
+ CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short by"
+ " %dB", size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_enc_hmac_prep(uint32_t flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *fc_params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t iv_offset = 0;
+ int32_t inputlen, outputlen, enc_dlen, auth_dlen;
+ struct cpt_ctx *cpt_ctx;
+ uint32_t cipher_type, hash_type;
+ uint32_t mac_len, size;
+ uint8_t iv_len = 16;
+ struct cpt_request_info *req;
+ buf_ptr_t *meta_p, *aad_buf = NULL;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len, aad_len = 0;
+ uint32_t passthrough_len = 0;
+ void *m_vaddr, *offset_vaddr;
+ uint64_t m_dma, offset_dma, ctx_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr;
+ uint64_t c_dma;
+ int32_t m_size;
+ opcode_info_t opcode;
+
+ meta_p = &fc_params->meta_buf;
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+ m_size = meta_p->size;
+
+ encr_offset = ENCR_OFFSET(d_offs);
+ auth_offset = AUTH_OFFSET(d_offs);
+ encr_data_len = ENCR_DLEN(d_lens);
+ auth_data_len = AUTH_DLEN(d_lens);
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * We dont support both aad
+ * and auth data separately
+ */
+ auth_data_len = 0;
+ auth_offset = 0;
+ aad_len = fc_params->aad_buf.size;
+ aad_buf = &fc_params->aad_buf;
+ }
+ cpt_ctx = fc_params->ctx_buf.vaddr;
+ cipher_type = cpt_ctx->enc_cipher;
+ hash_type = cpt_ctx->hash_type;
+ mac_len = cpt_ctx->mac_len;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* start cpt request info struct at 8 byte boundary */
+ size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
+ (uint8_t *)m_vaddr;
+
+ req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
+
+ size += sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ if (hash_type == GMAC_TYPE)
+ encr_data_len = 0;
+
+ if (unlikely(!(flags & VALID_IV_BUF))) {
+ iv_len = 0;
+ iv_offset = ENCR_IV_OFFSET(d_offs);
+ }
+
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * When AAD is given, data above encr_offset is pass through
+ * Since AAD is given as separate pointer and not as offset,
+ * this is a special case as we need to fragment input data
+ * into passthrough + encr_data and then insert AAD in between.
+ */
+ if (hash_type != GMAC_TYPE) {
+ passthrough_len = encr_offset;
+ auth_offset = passthrough_len + iv_len;
+ encr_offset = passthrough_len + aad_len + iv_len;
+ auth_data_len = aad_len + encr_data_len;
+ } else {
+ passthrough_len = 16 + aad_len;
+ auth_offset = passthrough_len + iv_len;
+ auth_data_len = aad_len;
+ }
+ } else {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /* Encryption */
+ opcode.s.major = CPT_MAJOR_OP_FC;
+ opcode.s.minor = 0;
+
+ auth_dlen = auth_offset + auth_data_len;
+ enc_dlen = encr_data_len + encr_offset;
+ if (unlikely(encr_data_len & 0xf)) {
+ if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
+ enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
+ else if (likely((cipher_type == AES_CBC) ||
+ (cipher_type == AES_ECB)))
+ enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
+ }
+
+ if (unlikely(hash_type == GMAC_TYPE)) {
+ encr_offset = auth_dlen;
+ enc_dlen = 0;
+ }
+
+ if (unlikely(auth_dlen > enc_dlen)) {
+ inputlen = auth_dlen;
+ outputlen = auth_dlen + mac_len;
+ } else {
+ inputlen = enc_dlen;
+ outputlen = enc_dlen + mac_len;
+ }
+
+ /* GP op header */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+ vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((flags & SINGLE_BUF_INPLACE) &&
+ (flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = fc_params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+
+ /* DPTR */
+ req->ist.ei1 = offset_dma;
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ + outputlen - iv_len);
+
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ } else {
+ uint32_t i, g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+
+ /* This falls under strict SG mode */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+ size = OFF_CTRL_LEN + iv_len;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+
+ i = 0;
+
+ /* Offset control word that includes iv */
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (likely(size)) {
+ uint32_t aad_offset = aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(gather_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ fc_params->src_iov,
+ 0, &size,
+ aad_buf, aad_offset);
+ }
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter list
+ */
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* Add IV */
+ if (likely(iv_len)) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* output data or output data + digest*/
+ if (unlikely(flags & VALID_MAC_BUF)) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ scatter_comp,
+ i,
+ fc_params->bufs,
+ &size);
+ } else {
+ i = fill_sg_comp_from_iov(scatter_comp,
+ i,
+ fc_params->dst_iov,
+ 0,
+ &size,
+ aad_buf,
+ aad_offset);
+ }
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ /* mac_data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ &fc_params->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (likely(size)) {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ scatter_comp,
+ i,
+ fc_params->bufs,
+ &size);
+ } else {
+ i = fill_sg_comp_from_iov(scatter_comp,
+ i,
+ fc_params->dst_iov,
+ 0,
+ &size,
+ aad_buf,
+ aad_offset);
+ }
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ ctx_dma = fc_params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, fctx);
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = ctx_dma;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_dec_hmac_prep(uint32_t flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *fc_params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t iv_offset = 0, size;
+ int32_t inputlen, outputlen, enc_dlen, auth_dlen;
+ struct cpt_ctx *cpt_ctx;
+ int32_t hash_type, mac_len, m_size;
+ uint8_t iv_len = 16;
+ struct cpt_request_info *req;
+ buf_ptr_t *meta_p, *aad_buf = NULL;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len, aad_len = 0;
+ uint32_t passthrough_len = 0;
+ void *m_vaddr, *offset_vaddr;
+ uint64_t m_dma, offset_dma, ctx_dma;
+ opcode_info_t opcode;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ void *c_vaddr;
+ uint64_t c_dma;
+
+ meta_p = &fc_params->meta_buf;
+ m_vaddr = meta_p->vaddr;
+ m_dma = meta_p->dma_addr;
+ m_size = meta_p->size;
+
+ encr_offset = ENCR_OFFSET(d_offs);
+ auth_offset = AUTH_OFFSET(d_offs);
+ encr_data_len = ENCR_DLEN(d_lens);
+ auth_data_len = AUTH_DLEN(d_lens);
+
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * We dont support both aad
+ * and auth data separately
+ */
+ auth_data_len = 0;
+ auth_offset = 0;
+ aad_len = fc_params->aad_buf.size;
+ aad_buf = &fc_params->aad_buf;
+ }
+
+ cpt_ctx = fc_params->ctx_buf.vaddr;
+ hash_type = cpt_ctx->hash_type;
+ mac_len = cpt_ctx->mac_len;
+
+ if (hash_type == GMAC_TYPE)
+ encr_data_len = 0;
+
+ if (unlikely(!(flags & VALID_IV_BUF))) {
+ iv_len = 0;
+ iv_offset = ENCR_IV_OFFSET(d_offs);
+ }
+
+ if (unlikely(flags & VALID_AAD_BUF)) {
+ /*
+ * When AAD is given, data above encr_offset is pass through
+ * Since AAD is given as separate pointer and not as offset,
+ * this is a special case as we need to fragment input data
+ * into passthrough + encr_data and then insert AAD in between.
+ */
+ if (hash_type != GMAC_TYPE) {
+ passthrough_len = encr_offset;
+ auth_offset = passthrough_len + iv_len;
+ encr_offset = passthrough_len + aad_len + iv_len;
+ auth_data_len = aad_len + encr_data_len;
+ } else {
+ passthrough_len = 16 + aad_len;
+ auth_offset = passthrough_len + iv_len;
+ auth_data_len = aad_len;
+ }
+ } else {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* start cpt request info structure at 8 byte alignment */
+ size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
+ (uint8_t *)m_vaddr;
+
+ req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
+
+ size += sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* Decryption */
+ opcode.s.major = CPT_MAJOR_OP_FC;
+ opcode.s.minor = 1;
+
+ enc_dlen = encr_offset + encr_data_len;
+ auth_dlen = auth_offset + auth_data_len;
+
+ if (auth_dlen > enc_dlen) {
+ inputlen = auth_dlen + mac_len;
+ outputlen = auth_dlen;
+ } else {
+ inputlen = enc_dlen + mac_len;
+ outputlen = enc_dlen;
+ }
+
+ if (hash_type == GMAC_TYPE)
+ encr_offset = inputlen;
+
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+ vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((flags & SINGLE_BUF_INPLACE) &&
+ (flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = fc_params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+ req->ist.ei1 = offset_dma;
+
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
+ outputlen - iv_len);
+ /* since this is decryption,
+ * don't touch the content of
+ * alternate ccode space as it contains
+ * hmac.
+ */
+
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ } else {
+ uint64_t dptr_dma, rptr_dma;
+ uint32_t g_size_bytes, s_size_bytes;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint8_t i = 0;
+
+ /* This falls under strict SG mode */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+ size = OFF_CTRL_LEN + iv_len;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ *(uint64_t *)offset_vaddr =
+ rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
+ ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word that includes iv */
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ if (flags & VALID_MAC_BUF) {
+ size = inputlen - iv_len - mac_len;
+ if (size) {
+ /* input data only */
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ gather_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset = aad_len ?
+ passthrough_len : 0;
+
+ i = fill_sg_comp_from_iov(gather_comp,
+ i,
+ fc_params->src_iov,
+ 0, &size,
+ aad_buf,
+ aad_offset);
+ }
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(gather_comp, i,
+ &fc_params->mac_buf);
+ }
+ } else {
+ /* input data + mac */
+ size = inputlen - iv_len;
+ if (size) {
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ gather_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset = aad_len ?
+ passthrough_len : 0;
+
+ if (!fc_params->src_iov)
+ return ERR_BAD_INPUT_ARG;
+
+ i = fill_sg_comp_from_iov(
+ gather_comp, i,
+ fc_params->src_iov,
+ 0, &size,
+ aad_buf,
+ aad_offset);
+ }
+
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* Add iv */
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ if (unlikely(flags & SINGLE_BUF_INPLACE)) {
+ /* handle single buffer here */
+ i = fill_sg_comp_from_buf_min(scatter_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset = aad_len ?
+ passthrough_len : 0;
+
+ if (!fc_params->dst_iov)
+ return ERR_BAD_INPUT_ARG;
+
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ fc_params->dst_iov, 0,
+ &size, aad_buf,
+ aad_offset);
+ }
+
+ if (unlikely(size))
+ return ERR_BAD_INPUT_ARG;
+ }
+
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+ size += COMPLETION_CODE_SIZE;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ ctx_dma = fc_params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, fctx);
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = ctx_dma;
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen, outputlen;
+ struct cpt_ctx *cpt_ctx;
+ uint32_t mac_len = 0;
+ uint8_t snow3g, j;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset = 0, auth_offset = 0;
+ uint32_t encr_data_len = 0, auth_data_len = 0;
+ int flags, iv_len = 16, m_size;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma, offset_ctrl;
+ uint64_t *offset_vaddr, offset_dma;
+ uint32_t *iv_s, iv[4];
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+ m_size = buf_p->size;
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ mac_len = cpt_ctx->mac_len;
+ snow3g = cpt_ctx->snow3g;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+
+ /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
+ opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
+ (0 << 3) | (flags & 0x7));
+
+ if (flags == 0x1) {
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ auth_data_len = AUTH_DLEN(d_lens);
+
+ /* EIA3 or UIA2 */
+ auth_offset = AUTH_OFFSET(d_offs);
+ auth_offset = auth_offset / 8;
+
+ /* consider iv len */
+ auth_offset += iv_len;
+
+ inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
+ outputlen = mac_len;
+
+ offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
+
+ } else {
+ /* EEA3 or UEA2 */
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ encr_data_len = ENCR_DLEN(d_lens);
+
+ encr_offset = ENCR_OFFSET(d_offs);
+ encr_offset = encr_offset / 8;
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* iv offset is 0 */
+ offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ }
+
+ /* IV */
+ iv_s = (flags == 0x1) ? params->auth_iv_buf :
+ params->iv_buf;
+
+ if (snow3g) {
+ /*
+ * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
+ * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
+ */
+
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[3 - j];
+ } else {
+ /* ZUC doesn't need a swap */
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[j];
+ }
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+ vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((req_flags & SINGLE_BUF_INPLACE) &&
+ (req_flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
+ OFF_CTRL_LEN - iv_len);
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+
+ /* DPTR */
+ req->ist.ei1 = offset_dma;
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ + outputlen - iv_len);
+
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ if (likely(iv_len)) {
+ uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+ }
+
+ *offset_vaddr = offset_ctrl;
+ } else {
+ uint32_t i, g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t *iv_d;
+
+ /* save space for iv */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+ m_size -= OFF_CTRL_LEN + iv_len;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ /* iv offset is 0 */
+ *offset_vaddr = offset_ctrl;
+
+ iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+
+ /* input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov,
+ 0, &size, NULL, 0);
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags == 0x1) {
+ /* IV in SLIST only for EEA3 & UEA2 */
+ iv_len = 0;
+ }
+
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN, iv_len);
+ }
+
+ /* Add output data */
+ if (req_flags & VALID_MAC_BUF) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ &params->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, zs_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen = 0, outputlen;
+ struct cpt_ctx *cpt_ctx;
+ uint8_t snow3g, iv_len = 16;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset;
+ uint32_t encr_data_len;
+ int flags, m_size;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma;
+ uint64_t *offset_vaddr, offset_dma;
+ uint32_t *iv_s, iv[4], j;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+ m_size = buf_p->size;
+
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ encr_offset = ENCR_OFFSET(d_offs) / 8;
+ encr_data_len = ENCR_DLEN(d_lens);
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ snow3g = cpt_ctx->snow3g;
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+
+ /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
+ opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
+ (0 << 3) | (flags & 0x7));
+
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = encr_offset +
+ (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* IV */
+ iv_s = params->iv_buf;
+ if (snow3g) {
+ /*
+ * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
+ * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
+ */
+
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[3 - j];
+ } else {
+ /* ZUC doesn't need a swap */
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[j];
+ }
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+
+ /*
+ * In 83XX since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((req_flags & SINGLE_BUF_INPLACE) &&
+ (req_flags & SINGLE_BUF_HEADTAILROOM))) {
+ void *dm_vaddr = params->bufs[0].vaddr;
+ uint64_t dm_dma_addr = params->bufs[0].dma_addr;
+ /*
+ * This flag indicates that there is 24 bytes head room and
+ * 8 bytes tail room available, so that we get to do
+ * DIRECT MODE with limitation
+ */
+
+ offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
+ OFF_CTRL_LEN - iv_len);
+ offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
+
+ /* DPTR */
+ req->ist.ei1 = offset_dma;
+ /* RPTR should just exclude offset control word */
+ req->ist.ei2 = dm_dma_addr - iv_len;
+ req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ + outputlen - iv_len);
+
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ if (likely(iv_len)) {
+ uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
+ + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+ }
+
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ } else {
+ uint32_t i, g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t *iv_d;
+
+ /* save space for offset and iv... */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+ m_size -= OFF_CTRL_LEN + iv_len;
+
+ opcode.s.major |= CPT_DMA_MODE;
+
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word */
+
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+
+ i = fill_sg_comp(gather_comp, i, offset_dma,
+ OFF_CTRL_LEN + iv_len);
+
+ iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov,
+ 0, &size, NULL, 0);
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* IV */
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+ }
+
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, zs_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_kasumi_enc_prep(uint32_t req_flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen = 0, outputlen = 0;
+ struct cpt_ctx *cpt_ctx;
+ uint32_t mac_len = 0;
+ uint8_t i = 0;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len;
+ int flags, m_size;
+ uint8_t *iv_s, *iv_d, iv_len = 8;
+ uint8_t dir = 0;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma;
+ uint64_t *offset_vaddr, offset_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+ m_size = buf_p->size;
+
+ encr_offset = ENCR_OFFSET(d_offs) / 8;
+ auth_offset = AUTH_OFFSET(d_offs) / 8;
+ encr_data_len = ENCR_DLEN(d_lens);
+ auth_data_len = AUTH_DLEN(d_lens);
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ mac_len = cpt_ctx->mac_len;
+
+ if (flags == 0x0)
+ iv_s = params->iv_buf;
+ else
+ iv_s = params->auth_iv_buf;
+
+ dir = iv_s[8] & 0x1;
+
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+
+ /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
+ opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ (dir << 4) | (0 << 3) | (flags & 0x7));
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+ vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ /* consider iv len */
+ if (flags == 0x0) {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /* save space for offset ctrl and iv */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+ m_size -= OFF_CTRL_LEN + iv_len;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+
+ if (flags == 0x0) {
+ inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ } else {
+ inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
+ outputlen = mac_len;
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
+ }
+
+ i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
+
+ /* IV */
+ iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
+ memcpy(iv_d, iv_s, iv_len);
+
+ /* input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov, 0,
+ &size, NULL, 0);
+
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ if (flags == 0x1) {
+ /* IV in SLIST only for F8 */
+ iv_len = 0;
+ }
+
+ /* IV */
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* Add output data */
+ if (req_flags & VALID_MAC_BUF) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ &params->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, k_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_kasumi_dec_prep(uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *params,
+ void *op,
+ void **prep_req)
+{
+ uint32_t size;
+ int32_t inputlen = 0, outputlen;
+ struct cpt_ctx *cpt_ctx;
+ uint8_t i = 0, iv_len = 8;
+ struct cpt_request_info *req;
+ buf_ptr_t *buf_p;
+ uint32_t encr_offset;
+ uint32_t encr_data_len;
+ int flags, m_size;
+ uint8_t dir = 0;
+ void *m_vaddr, *c_vaddr;
+ uint64_t m_dma, c_dma;
+ uint64_t *offset_vaddr, offset_dma;
+ vq_cmd_word0_t vq_cmd_w0;
+ vq_cmd_word3_t vq_cmd_w3;
+ opcode_info_t opcode;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ uint64_t dptr_dma, rptr_dma;
+ sg_comp_t *gather_comp;
+ sg_comp_t *scatter_comp;
+
+ buf_p = &params->meta_buf;
+ m_vaddr = buf_p->vaddr;
+ m_dma = buf_p->dma_addr;
+ m_size = buf_p->size;
+
+ encr_offset = ENCR_OFFSET(d_offs) / 8;
+ encr_data_len = ENCR_DLEN(d_lens);
+
+ cpt_ctx = params->ctx_buf.vaddr;
+ flags = cpt_ctx->zsk_flags;
+ /*
+ * Save initial space that followed app data for completion code &
+ * alternate completion code to fall in same cache line as app data
+ */
+ m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
+ m_dma += COMPLETION_CODE_SIZE;
+ size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
+ (uint8_t *)m_vaddr;
+
+ c_vaddr = (uint8_t *)m_vaddr + size;
+ c_dma = m_dma + size;
+ size += sizeof(cpt_res_s_t);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* Reserve memory for cpt request info */
+ req = m_vaddr;
+
+ size = sizeof(struct cpt_request_info);
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+
+ /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
+ opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ (dir << 4) | (0 << 3) | (flags & 0x7));
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+ vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* save space for offset ctrl & iv */
+ offset_vaddr = m_vaddr;
+ offset_dma = m_dma;
+
+ m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
+ m_dma += OFF_CTRL_LEN + iv_len;
+ m_size -= OFF_CTRL_LEN + iv_len;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+ dptr_dma = m_dma;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+
+ i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
+
+ /* IV */
+ memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
+ params->iv_buf, iv_len);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov,
+ 0, &size, NULL, 0);
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
+
+ /* IV */
+ i = fill_sg_comp(scatter_comp, i,
+ offset_dma + OFF_CTRL_LEN,
+ iv_len);
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+ if (size)
+ return ERR_BAD_INPUT_ARG;
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
+
+ size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len incase of SG mode */
+ vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+ m_dma += size;
+ m_size -= size;
+
+ /* cpt alternate completion address saved earlier */
+ req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
+ *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
+ rptr_dma = c_dma - 8;
+
+ req->ist.ei1 = dptr_dma;
+ req->ist.ei2 = rptr_dma;
+
+ /* First 16-bit swap then 64-bit swap */
+ /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
+ * to eliminate all the swapping
+ */
+ vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
+
+ /* vq command w3 */
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
+ offsetof(struct cpt_ctx, k_ctx);
+
+ /* 16 byte aligned cpt res address */
+ req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
+ *req->completion_addr = COMPLETION_CODE_INIT;
+ req->comp_baddr = c_dma;
+
+ /* Fill microcode part of instruction */
+ req->ist.ei0 = vq_cmd_w0.u64;
+ req->ist.ei3 = vq_cmd_w3.u64;
+
+ req->op = op;
+
+ *prep_req = req;
+ return 0;
+}
+
+static __rte_always_inline void *
+cpt_fc_dec_hmac_prep(uint32_t flags,
+ uint64_t d_offs,
+ uint64_t d_lens,
+ fc_params_t *fc_params,
+ void *op, int *ret_val)
+{
+ struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
+ uint8_t fc_type;
+ void *prep_req = NULL;
+ int ret;
+
+ fc_type = ctx->fc_type;
+
+ if (likely(fc_type == FC_GEN)) {
+ ret = cpt_dec_hmac_prep(flags, d_offs, d_lens,
+ fc_params, op, &prep_req);
+ } else if (fc_type == ZUC_SNOW3G) {
+ ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens,
+ fc_params, op, &prep_req);
+ } else if (fc_type == KASUMI) {
+ ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op,
+ &prep_req);
+ } else {
+ /*
+ * For AUTH_ONLY case,
+ * MC only supports digest generation and verification
+ * should be done in software by memcmp()
+ */
+
+ ret = ERR_EIO;
+ }
+
+ if (unlikely(!prep_req))
+ *ret_val = ret;
+ return prep_req;
+}
+
+static __rte_always_inline void *__hot
+cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
+ fc_params_t *fc_params, void *op, int *ret_val)
+{
+ struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
+ uint8_t fc_type;
+ void *prep_req = NULL;
+ int ret;
+
+ fc_type = ctx->fc_type;
+
+ /* Common api for rest of the ops */
+ if (likely(fc_type == FC_GEN)) {
+ ret = cpt_enc_hmac_prep(flags, d_offs, d_lens,
+ fc_params, op, &prep_req);
+ } else if (fc_type == ZUC_SNOW3G) {
+ ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens,
+ fc_params, op, &prep_req);
+ } else if (fc_type == KASUMI) {
+ ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens,
+ fc_params, op, &prep_req);
+ } else if (fc_type == HASH_HMAC) {
+ ret = cpt_digest_gen_prep(flags, d_lens, fc_params, op,
+ &prep_req);
+ } else {
+ ret = ERR_EIO;
+ }
+
+ if (unlikely(!prep_req))
+ *ret_val = ret;
+ return prep_req;
+}
+
+static __rte_always_inline int
+cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
+ uint16_t key_len, uint16_t mac_len)
+{
+ struct cpt_ctx *cpt_ctx = ctx;
+ mc_fc_context_t *fctx = &cpt_ctx->fctx;
+ uint64_t *ctrl_flags = NULL;
+
+ if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
+ uint32_t keyx[4];
+
+ if (key_len != 16)
+ return -1;
+ /* No support for AEAD yet */
+ if (cpt_ctx->enc_cipher)
+ return -1;
+ /* For ZUC/SNOW3G/Kasumi */
+ switch (type) {
+ case SNOW3G_UIA2:
+ cpt_ctx->snow3g = 1;
+ gen_key_snow3g(key, keyx);
+ memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ cpt_ctx->fc_type = ZUC_SNOW3G;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ case ZUC_EIA3:
+ cpt_ctx->snow3g = 0;
+ memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
+ memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ cpt_ctx->fc_type = ZUC_SNOW3G;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ case KASUMI_F9_ECB:
+ /* Kasumi ECB mode */
+ cpt_ctx->k_ecb = 1;
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->fc_type = KASUMI;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ case KASUMI_F9_CBC:
+ memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ cpt_ctx->fc_type = KASUMI;
+ cpt_ctx->zsk_flags = 0x1;
+ break;
+ default:
+ return -1;
+ }
+ cpt_ctx->mac_len = 4;
+ cpt_ctx->hash_type = type;
+ return 0;
+ }
+
+ if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
+ if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
+ cpt_ctx->fc_type = HASH_HMAC;
+ }
+
+ ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
+ *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
+
+ /* For GMAC auth, cipher must be NULL */
+ if (type == GMAC_TYPE)
+ CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
+
+ CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
+ CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
+
+ if (key_len) {
+ cpt_ctx->hmac = 1;
+ memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
+ memcpy(cpt_ctx->auth_key, key, key_len);
+ cpt_ctx->auth_key_len = key_len;
+ memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
+ memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
+ memcpy(fctx->hmac.opad, key, key_len);
+ CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
+ }
+ *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_aead(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_aead_xform *aead_form;
+ cipher_type_t enc_type = 0; /* NULL Cipher type */
+ auth_type_t auth_type = 0; /* NULL Auth type */
+ uint32_t cipher_key_len = 0;
+ uint8_t zsk_flag = 0, aes_gcm = 0;
+ aead_form = &xform->aead;
+ void *ctx;
+
+ if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
+ sess->cpt_op |= CPT_OP_AUTH_GENERATE;
+ } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
+ sess->cpt_op |= CPT_OP_AUTH_VERIFY;
+ } else {
+ CPT_LOG_DP_ERR("Unknown cipher operation\n");
+ return -1;
+ }
+ switch (aead_form->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ enc_type = AES_GCM;
+ cipher_key_len = 16;
+ aes_gcm = 1;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
+ aead_form->algo);
+ return -1;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ aead_form->algo);
+ return -1;
+ }
+ if (aead_form->key.length < cipher_key_len) {
+ CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
+ (unsigned int long)aead_form->key.length);
+ return -1;
+ }
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = aes_gcm;
+ sess->mac_len = aead_form->digest_length;
+ sess->iv_offset = aead_form->iv.offset;
+ sess->iv_length = aead_form->iv.length;
+ sess->aad_length = aead_form->aad_length;
+ ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
+
+ cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
+ aead_form->key.length, NULL);
+
+ cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
+
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_cipher(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_cipher_xform *c_form;
+ cipher_type_t enc_type = 0; /* NULL Cipher type */
+ uint32_t cipher_key_len = 0;
+ uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return -1;
+
+ c_form = &xform->cipher;
+
+ if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
+ else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
+ sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
+ else {
+ CPT_LOG_DP_ERR("Unknown cipher operation\n");
+ return -1;
+ }
+
+ switch (c_form->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ enc_type = AES_CBC;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ enc_type = DES3_CBC;
+ cipher_key_len = 24;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ /* DES is implemented using 3DES in hardware */
+ enc_type = DES3_CBC;
+ cipher_key_len = 8;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ enc_type = AES_CTR;
+ cipher_key_len = 16;
+ aes_ctr = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ enc_type = 0;
+ is_null = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ enc_type = KASUMI_F8_ECB;
+ cipher_key_len = 16;
+ zsk_flag = K_F8;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ enc_type = SNOW3G_UEA2;
+ cipher_key_len = 16;
+ zsk_flag = ZS_EA;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ enc_type = ZUC_EEA3;
+ cipher_key_len = 16;
+ zsk_flag = ZS_EA;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ enc_type = AES_XTS;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ enc_type = DES3_ECB;
+ cipher_key_len = 24;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ enc_type = AES_ECB;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
+ c_form->algo);
+ return -1;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ c_form->algo);
+ return -1;
+ }
+
+ if (c_form->key.length < cipher_key_len) {
+ CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
+ (unsigned long) c_form->key.length);
+ return -1;
+ }
+
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = aes_gcm;
+ sess->aes_ctr = aes_ctr;
+ sess->iv_offset = c_form->iv.offset;
+ sess->iv_length = c_form->iv.length;
+ sess->is_null = is_null;
+
+ cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
+ c_form->key.length, NULL);
+
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_auth(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_auth_xform *a_form;
+ auth_type_t auth_type = 0; /* NULL Auth type */
+ uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ goto error_out;
+
+ a_form = &xform->auth;
+
+ if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->cpt_op |= CPT_OP_AUTH_VERIFY;
+ else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cpt_op |= CPT_OP_AUTH_GENERATE;
+ else {
+ CPT_LOG_DP_ERR("Unknown auth operation");
+ return -1;
+ }
+
+ if (a_form->key.length > 64) {
+ CPT_LOG_DP_ERR("Auth key length is big");
+ return -1;
+ }
+
+ switch (a_form->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ /* Fall through */
+ case RTE_CRYPTO_AUTH_SHA1:
+ auth_type = SHA1_TYPE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256:
+ auth_type = SHA2_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512:
+ auth_type = SHA2_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ auth_type = GMAC_TYPE;
+ aes_gcm = 1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224:
+ auth_type = SHA2_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384:
+ auth_type = SHA2_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_MD5:
+ auth_type = MD5_TYPE;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ auth_type = KASUMI_F9_ECB;
+ /*
+ * Indicate that direction needs to be taken out
+ * from end of src
+ */
+ zsk_flag = K_F9;
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ auth_type = SNOW3G_UIA2;
+ zsk_flag = ZS_IA;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ auth_type = ZUC_EIA3;
+ zsk_flag = ZS_IA;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ auth_type = 0;
+ is_null = 1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
+ a_form->algo);
+ goto error_out;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
+ a_form->algo);
+ goto error_out;
+ }
+
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = aes_gcm;
+ sess->mac_len = a_form->digest_length;
+ sess->is_null = is_null;
+ if (zsk_flag) {
+ sess->auth_iv_offset = a_form->iv.offset;
+ sess->auth_iv_length = a_form->iv.length;
+ }
+ cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
+ a_form->key.length, a_form->digest_length);
+
+ return 0;
+
+error_out:
+ return -1;
+}
+
+static __rte_always_inline int
+fill_sess_gmac(struct rte_crypto_sym_xform *xform,
+ struct cpt_sess_misc *sess)
+{
+ struct rte_crypto_auth_xform *a_form;
+ cipher_type_t enc_type = 0; /* NULL Cipher type */
+ auth_type_t auth_type = 0; /* NULL Auth type */
+ uint8_t zsk_flag = 0, aes_gcm = 0;
+ void *ctx;
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -1;
+
+ a_form = &xform->auth;
+
+ if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cpt_op |= CPT_OP_ENCODE;
+ else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->cpt_op |= CPT_OP_DECODE;
+ else {
+ CPT_LOG_DP_ERR("Unknown auth operation");
+ return -1;
+ }
+
+ switch (a_form->algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ enc_type = AES_GCM;
+ auth_type = GMAC_TYPE;
+ break;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ a_form->algo);
+ return -1;
+ }
+
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = aes_gcm;
+ sess->is_gmac = 1;
+ sess->iv_offset = a_form->iv.offset;
+ sess->iv_length = a_form->iv.length;
+ sess->mac_len = a_form->digest_length;
+ ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
+
+ cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
+ a_form->key.length, NULL);
+ cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
+
+ return 0;
+}
+
+static __rte_always_inline void *
+alloc_op_meta(struct rte_mbuf *m_src,
+ buf_ptr_t *buf,
+ int32_t len,
+ struct rte_mempool *cpt_meta_pool)
+{
+ uint8_t *mdata;
+
+#ifndef CPT_ALWAYS_USE_SEPARATE_BUF
+ if (likely(m_src && (m_src->nb_segs == 1))) {
+ int32_t tailroom;
+ phys_addr_t mphys;
+
+ /* Check if tailroom is sufficient to hold meta data */
+ tailroom = rte_pktmbuf_tailroom(m_src);
+ if (likely(tailroom > len + 8)) {
+ mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
+ mphys = m_src->buf_physaddr + m_src->buf_len;
+ mdata -= len;
+ mphys -= len;
+ buf->vaddr = mdata;
+ buf->dma_addr = mphys;
+ buf->size = len;
+ /* Indicate that this is a mbuf allocated mdata */
+ mdata = (uint8_t *)((uint64_t)mdata | 1ull);
+ return mdata;
+ }
+ }
+#else
+ RTE_SET_USED(m_src);
+#endif
+
+ if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
+ return NULL;
+
+ buf->vaddr = mdata;
+ buf->dma_addr = rte_mempool_virt2iova(mdata);
+ buf->size = len;
+
+ return mdata;
+}
+
+/**
+ * cpt_free_metabuf - free metabuf to mempool.
+ * @param instance: pointer to instance.
+ * @param objp: pointer to the metabuf.
+ */
+static __rte_always_inline void
+free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
+{
+ bool nofree = ((uintptr_t)mdata & 1ull);
+
+ if (likely(nofree))
+ return;
+ rte_mempool_put(cpt_meta_pool, mdata);
+}
+
+static __rte_always_inline uint32_t
+prepare_iov_from_pkt(struct rte_mbuf *pkt,
+ iov_ptr_t *iovec, uint32_t start_offset)
+{
+ uint16_t index = 0;
+ void *seg_data = NULL;
+ phys_addr_t seg_phys;
+ int32_t seg_size = 0;
+
+ if (!pkt) {
+ iovec->buf_cnt = 0;
+ return 0;
+ }
+
+ if (!start_offset) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+ } else {
+ while (start_offset >= pkt->data_len) {
+ start_offset -= pkt->data_len;
+ pkt = pkt->next;
+ }
+
+ seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
+ seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
+ seg_size = pkt->data_len - start_offset;
+ if (!seg_size)
+ return 1;
+ }
+
+ /* first seg */
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+ index++;
+ pkt = pkt->next;
+
+ while (unlikely(pkt != NULL)) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+ if (!seg_size)
+ break;
+
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+
+ index++;
+
+ pkt = pkt->next;
+ }
+
+ iovec->buf_cnt = index;
+ return 0;
+}
+
+static __rte_always_inline uint32_t
+prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
+ fc_params_t *param,
+ uint32_t *flags)
+{
+ uint16_t index = 0;
+ void *seg_data = NULL;
+ phys_addr_t seg_phys;
+ uint32_t seg_size = 0;
+ iov_ptr_t *iovec;
+
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+
+ /* first seg */
+ if (likely(!pkt->next)) {
+ uint32_t headroom, tailroom;
+
+ *flags |= SINGLE_BUF_INPLACE;
+ headroom = rte_pktmbuf_headroom(pkt);
+ tailroom = rte_pktmbuf_tailroom(pkt);
+ if (likely((headroom >= 24) &&
+ (tailroom >= 8))) {
+ /* In 83XX this is prerequivisit for Direct mode */
+ *flags |= SINGLE_BUF_HEADTAILROOM;
+ }
+ param->bufs[0].vaddr = seg_data;
+ param->bufs[0].dma_addr = seg_phys;
+ param->bufs[0].size = seg_size;
+ return 0;
+ }
+ iovec = param->src_iov;
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+ index++;
+ pkt = pkt->next;
+
+ while (unlikely(pkt != NULL)) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_size = pkt->data_len;
+
+ if (!seg_size)
+ break;
+
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].dma_addr = seg_phys;
+ iovec->bufs[index].size = seg_size;
+
+ index++;
+
+ pkt = pkt->next;
+ }
+
+ iovec->buf_cnt = index;
+ return 0;
+}
+
+static __rte_always_inline void *
+fill_fc_params(struct rte_crypto_op *cop,
+ struct cpt_sess_misc *sess_misc,
+ void **mdata_ptr,
+ int *op_ret)
+{
+ uint32_t space = 0;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ void *mdata;
+ uintptr_t *op;
+ uint32_t mc_hash_off;
+ uint32_t flags = 0;
+ uint64_t d_offs, d_lens;
+ void *prep_req = NULL;
+ struct rte_mbuf *m_src, *m_dst;
+ uint8_t cpt_op = sess_misc->cpt_op;
+ uint8_t zsk_flag = sess_misc->zsk_flag;
+ uint8_t aes_gcm = sess_misc->aes_gcm;
+ uint16_t mac_len = sess_misc->mac_len;
+#ifdef CPT_ALWAYS_USE_SG_MODE
+ uint8_t inplace = 0;
+#else
+ uint8_t inplace = 1;
+#endif
+ fc_params_t fc_params;
+ char src[SRC_IOV_SIZE];
+ char dst[SRC_IOV_SIZE];
+ uint32_t iv_buf[4];
+ struct cptvf_meta_info *cpt_m_info =
+ (struct cptvf_meta_info *)(*mdata_ptr);
+
+ if (likely(sess_misc->iv_length)) {
+ flags |= VALID_IV_BUF;
+ fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
+ uint8_t *, sess_misc->iv_offset);
+ if (sess_misc->aes_ctr &&
+ unlikely(sess_misc->iv_length != 16)) {
+ memcpy((uint8_t *)iv_buf,
+ rte_crypto_op_ctod_offset(cop,
+ uint8_t *, sess_misc->iv_offset), 12);
+ iv_buf[3] = rte_cpu_to_be_32(0x1);
+ fc_params.iv_buf = iv_buf;
+ }
+ }
+
+ if (zsk_flag) {
+ fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
+ uint8_t *,
+ sess_misc->auth_iv_offset);
+ if (zsk_flag == K_F9) {
+ CPT_LOG_DP_ERR("Should not reach here for "
+ "kasumi F9\n");
+ }
+ if (zsk_flag != ZS_EA)
+ inplace = 0;
+ }
+ m_src = sym_op->m_src;
+ m_dst = sym_op->m_dst;
+
+ if (aes_gcm) {
+ uint8_t *salt;
+ uint8_t *aad_data;
+ uint16_t aad_len;
+
+ d_offs = sym_op->aead.data.offset;
+ d_lens = sym_op->aead.data.length;
+ mc_hash_off = sym_op->aead.data.offset +
+ sym_op->aead.data.length;
+
+ aad_data = sym_op->aead.aad.data;
+ aad_len = sess_misc->aad_length;
+ if (likely((aad_data + aad_len) ==
+ rte_pktmbuf_mtod_offset(m_src,
+ uint8_t *,
+ sym_op->aead.data.offset))) {
+ d_offs = (d_offs - aad_len) | (d_offs << 16);
+ d_lens = (d_lens + aad_len) | (d_lens << 32);
+ } else {
+ fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
+ fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
+ fc_params.aad_buf.size = aad_len;
+ flags |= VALID_AAD_BUF;
+ inplace = 0;
+ d_offs = d_offs << 16;
+ d_lens = d_lens << 32;
+ }
+
+ salt = fc_params.iv_buf;
+ if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
+ cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
+ sess_misc->salt = *(uint32_t *)salt;
+ }
+ fc_params.iv_buf = salt + 4;
+ if (likely(mac_len)) {
+ struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
+ m_src;
+
+ if (!m)
+ m = m_src;
+
+ /* hmac immediately following data is best case */
+ if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+ mc_hash_off !=
+ (uint8_t *)sym_op->aead.digest.data)) {
+ flags |= VALID_MAC_BUF;
+ fc_params.mac_buf.size = sess_misc->mac_len;
+ fc_params.mac_buf.vaddr =
+ sym_op->aead.digest.data;
+ fc_params.mac_buf.dma_addr =
+ sym_op->aead.digest.phys_addr;
+ inplace = 0;
+ }
+ }
+ } else {
+ d_offs = sym_op->cipher.data.offset;
+ d_lens = sym_op->cipher.data.length;
+ mc_hash_off = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length;
+ d_offs = (d_offs << 16) | sym_op->auth.data.offset;
+ d_lens = (d_lens << 32) | sym_op->auth.data.length;
+
+ if (mc_hash_off < (sym_op->auth.data.offset +
+ sym_op->auth.data.length)){
+ mc_hash_off = (sym_op->auth.data.offset +
+ sym_op->auth.data.length);
+ }
+ /* for gmac, salt should be updated like in gcm */
+ if (unlikely(sess_misc->is_gmac)) {
+ uint8_t *salt;
+ salt = fc_params.iv_buf;
+ if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
+ cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
+ sess_misc->salt = *(uint32_t *)salt;
+ }
+ fc_params.iv_buf = salt + 4;
+ }
+ if (likely(mac_len)) {
+ struct rte_mbuf *m;
+
+ m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
+ if (!m)
+ m = m_src;
+
+ /* hmac immediately following data is best case */
+ if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+ mc_hash_off !=
+ (uint8_t *)sym_op->auth.digest.data)) {
+ flags |= VALID_MAC_BUF;
+ fc_params.mac_buf.size =
+ sess_misc->mac_len;
+ fc_params.mac_buf.vaddr =
+ sym_op->auth.digest.data;
+ fc_params.mac_buf.dma_addr =
+ sym_op->auth.digest.phys_addr;
+ inplace = 0;
+ }
+ }
+ }
+ fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
+ fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
+
+ if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
+ inplace = 0;
+
+ if (likely(!m_dst && inplace)) {
+ /* Case of single buffer without AAD buf or
+ * separate mac buf in place and
+ * not air crypto
+ */
+ fc_params.dst_iov = fc_params.src_iov = (void *)src;
+
+ if (unlikely(prepare_iov_from_pkt_inplace(m_src,
+ &fc_params,
+ &flags))) {
+ CPT_LOG_DP_ERR("Prepare inplace src iov failed");
+ *op_ret = -1;
+ return NULL;
+ }
+
+ } else {
+ /* Out of place processing */
+ fc_params.src_iov = (void *)src;
+ fc_params.dst_iov = (void *)dst;
+
+ /* Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
+ CPT_LOG_DP_ERR("Prepare src iov failed");
+ *op_ret = -1;
+ return NULL;
+ }
+
+ if (unlikely(m_dst != NULL)) {
+ uint32_t pkt_len;
+
+ /* Try to make room as much as src has */
+ m_dst = sym_op->m_dst;
+ pkt_len = rte_pktmbuf_pkt_len(m_dst);
+
+ if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
+ pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
+ if (!rte_pktmbuf_append(m_dst, pkt_len)) {
+ CPT_LOG_DP_ERR("Not enough space in "
+ "m_dst %p, need %u"
+ " more",
+ m_dst, pkt_len);
+ return NULL;
+ }
+ }
+
+ if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
+ CPT_LOG_DP_ERR("Prepare dst iov failed for "
+ "m_dst %p", m_dst);
+ return NULL;
+ }
+ } else {
+ fc_params.dst_iov = (void *)src;
+ }
+ }
+
+ if (likely(flags & SINGLE_BUF_HEADTAILROOM))
+ mdata = alloc_op_meta(m_src,
+ &fc_params.meta_buf,
+ cpt_m_info->cptvf_op_sb_mlen,
+ cpt_m_info->cptvf_meta_pool);
+ else
+ mdata = alloc_op_meta(NULL,
+ &fc_params.meta_buf,
+ cpt_m_info->cptvf_op_mlen,
+ cpt_m_info->cptvf_meta_pool);
+
+ if (unlikely(mdata == NULL)) {
+ CPT_LOG_DP_ERR("Error allocating meta buffer for request");
+ return NULL;
+ }
+
+ op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
+ op[0] = (uintptr_t)mdata;
+ op[1] = (uintptr_t)cop;
+ op[2] = op[3] = 0; /* Used to indicate auth verify */
+ space += 4 * sizeof(uint64_t);
+
+ fc_params.meta_buf.vaddr = (uint8_t *)op + space;
+ fc_params.meta_buf.dma_addr += space;
+ fc_params.meta_buf.size -= space;
+
+ /* Finally prepare the instruction */
+ if (cpt_op & CPT_OP_ENCODE)
+ prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op, op_ret);
+ else
+ prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op, op_ret);
+
+ if (unlikely(!prep_req))
+ free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
+ *mdata_ptr = mdata;
+ return prep_req;
+}
+
+static __rte_always_inline void
+compl_auth_verify(struct rte_crypto_op *op,
+ uint8_t *gen_mac,
+ uint64_t mac_len)
+{
+ uint8_t *mac;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (sym_op->auth.digest.data)
+ mac = sym_op->auth.digest.data;
+ else
+ mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *,
+ sym_op->auth.data.length +
+ sym_op->auth.data.offset);
+ if (!mac) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (memcmp(mac, gen_mac, mac_len))
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+static __rte_always_inline int
+instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct rte_crypto_sym_xform *chain;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (cpt_is_algo_supported(xform))
+ goto err;
+
+ chain = xform;
+ while (chain) {
+ switch (chain->type) {
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ if (fill_sess_aead(chain, sess))
+ goto err;
+ break;
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ if (fill_sess_cipher(chain, sess))
+ goto err;
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (fill_sess_gmac(chain, sess))
+ goto err;
+ } else {
+ if (fill_sess_auth(chain, sess))
+ goto err;
+ }
+ break;
+ default:
+ CPT_LOG_DP_ERR("Invalid crypto xform type");
+ break;
+ }
+ chain = chain->next;
+ }
+
+ return 0;
+
+err:
+ return -1;
+}
+
+static __rte_always_inline void
+find_kasumif9_direction_and_length(uint8_t *src,
+ uint32_t counter_num_bytes,
+ uint32_t *addr_length_in_bits,
+ uint8_t *addr_direction)
+{
+ uint8_t found = 0;
+ while (!found && counter_num_bytes > 0) {
+ counter_num_bytes--;
+ if (src[counter_num_bytes] == 0x00)
+ continue;
+ if (src[counter_num_bytes] == 0x80) {
+ *addr_direction = src[counter_num_bytes - 1] & 0x1;
+ *addr_length_in_bits = counter_num_bytes * 8 - 1;
+ found = 1;
+ } else {
+ int i = 0;
+ uint8_t last_byte = src[counter_num_bytes];
+ for (i = 0; i < 8 && found == 0; i++) {
+ if (last_byte & (1 << i)) {
+ *addr_direction = (last_byte >> (i+1))
+ & 0x1;
+ if (i != 6)
+ *addr_length_in_bits =
+ counter_num_bytes * 8
+ + (8 - (i + 2));
+ else
+ *addr_length_in_bits =
+ counter_num_bytes * 8;
+ found = 1;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * This handles all auth only except AES_GMAC
+ */
+static __rte_always_inline void *
+fill_digest_params(struct rte_crypto_op *cop,
+ struct cpt_sess_misc *sess,
+ void **mdata_ptr,
+ int *op_ret)
+{
+ uint32_t space = 0;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ void *mdata;
+ phys_addr_t mphys;
+ uint64_t *op;
+ uint32_t auth_range_off;
+ uint32_t flags = 0;
+ uint64_t d_offs = 0, d_lens;
+ void *prep_req = NULL;
+ struct rte_mbuf *m_src, *m_dst;
+ uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
+ uint8_t zsk_flag = sess->zsk_flag;
+ uint16_t mac_len = sess->mac_len;
+ fc_params_t params;
+ char src[SRC_IOV_SIZE];
+ uint8_t iv_buf[16];
+ memset(&params, 0, sizeof(fc_params_t));
+ struct cptvf_meta_info *cpt_m_info =
+ (struct cptvf_meta_info *)(*mdata_ptr);
+
+ m_src = sym_op->m_src;
+
+ /* For just digest lets force mempool alloc */
+ mdata = alloc_op_meta(NULL, &params.meta_buf, cpt_m_info->cptvf_op_mlen,
+ cpt_m_info->cptvf_meta_pool);
+ if (mdata == NULL) {
+ CPT_LOG_DP_ERR("Error allocating meta buffer for request");
+ *op_ret = -ENOMEM;
+ return NULL;
+ }
+
+ mphys = params.meta_buf.dma_addr;
+
+ op = mdata;
+ op[0] = (uintptr_t)mdata;
+ op[1] = (uintptr_t)cop;
+ op[2] = op[3] = 0; /* Used to indicate auth verify */
+ space += 4 * sizeof(uint64_t);
+
+ auth_range_off = sym_op->auth.data.offset;
+
+ flags = VALID_MAC_BUF;
+ params.src_iov = (void *)src;
+ if (unlikely(zsk_flag)) {
+ /*
+ * Since for Zuc, Kasumi, Snow3g offsets are in bits
+ * we will send pass through even for auth only case,
+ * let MC handle it
+ */
+ d_offs = auth_range_off;
+ auth_range_off = 0;
+ params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
+ uint8_t *, sess->auth_iv_offset);
+ if (zsk_flag == K_F9) {
+ uint32_t length_in_bits, num_bytes;
+ uint8_t *src, direction = 0;
+ uint32_t counter_num_bytes;
+
+ memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
+ uint8_t *), 8);
+ /*
+ * This is kasumi f9, take direction from
+ * source buffer
+ */
+ length_in_bits = cop->sym->auth.data.length;
+ num_bytes = (length_in_bits >> 3);
+ counter_num_bytes = num_bytes;
+ src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
+ find_kasumif9_direction_and_length(src,
+ counter_num_bytes,
+ &length_in_bits,
+ &direction);
+ length_in_bits -= 64;
+ cop->sym->auth.data.offset += 64;
+ d_offs = cop->sym->auth.data.offset;
+ auth_range_off = d_offs / 8;
+ cop->sym->auth.data.length = length_in_bits;
+
+ /* Store it at end of auth iv */
+ iv_buf[8] = direction;
+ params.auth_iv_buf = iv_buf;
+ }
+ }
+
+ d_lens = sym_op->auth.data.length;
+
+ params.ctx_buf.vaddr = SESS_PRIV(sess);
+ params.ctx_buf.dma_addr = sess->ctx_dma_addr;
+
+ if (auth_op == CPT_OP_AUTH_GENERATE) {
+ if (sym_op->auth.digest.data) {
+ /*
+ * Digest to be generated
+ * in separate buffer
+ */
+ params.mac_buf.size =
+ sess->mac_len;
+ params.mac_buf.vaddr =
+ sym_op->auth.digest.data;
+ params.mac_buf.dma_addr =
+ sym_op->auth.digest.phys_addr;
+ } else {
+ uint32_t off = sym_op->auth.data.offset +
+ sym_op->auth.data.length;
+ int32_t dlen, space;
+
+ m_dst = sym_op->m_dst ?
+ sym_op->m_dst : sym_op->m_src;
+ dlen = rte_pktmbuf_pkt_len(m_dst);
+
+ space = off + mac_len - dlen;
+ if (space > 0)
+ if (!rte_pktmbuf_append(m_dst, space)) {
+ CPT_LOG_DP_ERR("Failed to extend "
+ "mbuf by %uB", space);
+ goto err;
+ }
+
+ params.mac_buf.vaddr =
+ rte_pktmbuf_mtod_offset(m_dst, void *, off);
+ params.mac_buf.dma_addr =
+ rte_pktmbuf_mtophys_offset(m_dst, off);
+ params.mac_buf.size = mac_len;
+ }
+ } else {
+ /* Need space for storing generated mac */
+ params.mac_buf.vaddr = (uint8_t *)mdata + space;
+ params.mac_buf.dma_addr = mphys + space;
+ params.mac_buf.size = mac_len;
+ space += RTE_ALIGN_CEIL(mac_len, 8);
+ op[2] = (uintptr_t)params.mac_buf.vaddr;
+ op[3] = mac_len;
+ }
+
+ params.meta_buf.vaddr = (uint8_t *)mdata + space;
+ params.meta_buf.dma_addr = mphys + space;
+ params.meta_buf.size -= space;
+
+ /* Out of place processing */
+ params.src_iov = (void *)src;
+
+ /*Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
+ CPT_LOG_DP_ERR("Prepare src iov failed");
+ *op_ret = -1;
+ goto err;
+ }
+
+ prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+ &params, op, op_ret);
+ *mdata_ptr = mdata;
+ return prep_req;
+err:
+ if (unlikely(!prep_req))
+ free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
+ return NULL;
+}
+
+#endif /*_CPT_UCODE_H_ */
diff --git a/drivers/common/cpt/meson.build b/drivers/common/cpt/meson.build
new file mode 100644
index 00000000..0a905aa4
--- /dev/null
+++ b/drivers/common/cpt/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+sources = files('cpt_pmd_ops_helper.c')
+
+deps = ['kvargs', 'pci', 'cryptodev']
+includes += include_directories('../../crypto/octeontx')
+allow_experimental_apis = true
diff --git a/drivers/common/cpt/rte_common_cpt_version.map b/drivers/common/cpt/rte_common_cpt_version.map
new file mode 100644
index 00000000..dec614f0
--- /dev/null
+++ b/drivers/common/cpt/rte_common_cpt_version.map
@@ -0,0 +1,6 @@
+DPDK_18.11 {
+ global:
+
+ cpt_pmd_ops_helper_get_mlen_direct_mode;
+ cpt_pmd_ops_helper_get_mlen_sg_mode;
+};
diff --git a/drivers/common/dpaax/Makefile b/drivers/common/dpaax/Makefile
new file mode 100644
index 00000000..94d2cf0c
--- /dev/null
+++ b/drivers/common/dpaax/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_dpaax.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# versioning export map
+EXPORT_MAP := rte_common_dpaax_version.map
+
+# library version
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += dpaax_iova_table.c
+
+LDLIBS += -lrte_eal
+
+SYMLINK-y-include += dpaax_iova_table.h
+
+include $(RTE_SDK)/mk/rte.lib.mk \ No newline at end of file
diff --git a/drivers/common/dpaax/dpaax_iova_table.c b/drivers/common/dpaax/dpaax_iova_table.c
new file mode 100644
index 00000000..2dd38a92
--- /dev/null
+++ b/drivers/common/dpaax/dpaax_iova_table.c
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <rte_memory.h>
+
+#include "dpaax_iova_table.h"
+#include "dpaax_logs.h"
+
+/* Global dpaax logger identifier */
+int dpaax_logger;
+
+/* Global table reference */
+struct dpaax_iova_table *dpaax_iova_table_p;
+
+static int dpaax_handle_memevents(void);
+
+/* A structure representing the device-tree node available in /proc/device-tree.
+ */
+struct reg_node {
+ phys_addr_t addr;
+ size_t len;
+};
+
+/* A ntohll equivalent routine
+ * XXX: This is only applicable for 64 bit environment.
+ */
+static void
+rotate_8(unsigned char *arr)
+{
+ uint32_t temp;
+ uint32_t *first_half;
+ uint32_t *second_half;
+
+ first_half = (uint32_t *)(arr);
+ second_half = (uint32_t *)(arr + 4);
+
+ temp = *first_half;
+ *first_half = *second_half;
+ *second_half = temp;
+
+ *first_half = ntohl(*first_half);
+ *second_half = ntohl(*second_half);
+}
+
+/* read_memory_nodes
+ * Memory layout for DPAAx platforms (LS1043, LS1046, LS1088, LS2088, LX2160)
+ * are populated by Uboot and available in device tree:
+ * /proc/device-tree/memory@<address>/reg <= register.
+ * Entries are of the form:
+ * (<8 byte start addr><8 byte length>)(..more similar blocks of start,len>)..
+ *
+ * @param count
+ * OUT populate number of entries found in memory node
+ * @return
+ * Pointer to array of reg_node elements, count size
+ */
+static struct reg_node *
+read_memory_node(unsigned int *count)
+{
+ int fd, ret, i;
+ unsigned int j;
+ glob_t result = {0};
+ struct stat statbuf = {0};
+ char file_data[MEM_NODE_FILE_LEN];
+ struct reg_node *nodes = NULL;
+
+ *count = 0;
+
+ ret = glob(MEM_NODE_PATH_GLOB, 0, NULL, &result);
+ if (ret != 0) {
+ DPAAX_DEBUG("Unable to glob device-tree memory node: (%s)(%d)",
+ MEM_NODE_PATH_GLOB, ret);
+ goto out;
+ }
+
+ if (result.gl_pathc != 1) {
+ /* Either more than one memory@<addr> node found, or none.
+ * In either case, cannot work ahead.
+ */
+ DPAAX_DEBUG("Found (%zu) entries in device-tree. Not supported!",
+ result.gl_pathc);
+ goto out;
+ }
+
+ DPAAX_DEBUG("Opening and parsing device-tree node: (%s)",
+ result.gl_pathv[0]);
+ fd = open(result.gl_pathv[0], O_RDONLY);
+ if (fd < 0) {
+ DPAAX_DEBUG("Unable to open the device-tree node: (%s)(fd=%d)",
+ MEM_NODE_PATH_GLOB, fd);
+ goto cleanup;
+ }
+
+ /* Stat to get the file size */
+ ret = fstat(fd, &statbuf);
+ if (ret != 0) {
+ DPAAX_DEBUG("Unable to get device-tree memory node size.");
+ goto cleanup;
+ }
+
+ DPAAX_DEBUG("Size of device-tree mem node: %lu", statbuf.st_size);
+ if (statbuf.st_size > MEM_NODE_FILE_LEN) {
+ DPAAX_DEBUG("More memory nodes available than assumed.");
+ DPAAX_DEBUG("System may not work properly!");
+ }
+
+ ret = read(fd, file_data, statbuf.st_size > MEM_NODE_FILE_LEN ?
+ MEM_NODE_FILE_LEN : statbuf.st_size);
+ if (ret <= 0) {
+ DPAAX_DEBUG("Unable to read device-tree memory node: (%d)",
+ ret);
+ goto cleanup;
+ }
+
+ /* The reg node should be multiple of 16 bytes, 8 bytes each for addr
+ * and len.
+ */
+ *count = (statbuf.st_size / 16);
+ if ((*count) <= 0 || (statbuf.st_size % 16 != 0)) {
+ DPAAX_DEBUG("Invalid memory node values or count. (size=%lu)",
+ statbuf.st_size);
+ goto cleanup;
+ }
+
+ /* each entry is of 16 bytes, and size/16 is total count of entries */
+ nodes = malloc(sizeof(struct reg_node) * (*count));
+ if (!nodes) {
+ DPAAX_DEBUG("Failure in allocating working memory.");
+ goto cleanup;
+ }
+ memset(nodes, 0, sizeof(struct reg_node) * (*count));
+
+ for (i = 0, j = 0; i < (statbuf.st_size) && j < (*count); i += 16, j++) {
+ memcpy(&nodes[j], file_data + i, 16);
+ /* Rotate (ntohl) each 8 byte entry */
+ rotate_8((unsigned char *)(&(nodes[j].addr)));
+ rotate_8((unsigned char *)(&(nodes[j].len)));
+ }
+
+ DPAAX_DEBUG("Device-tree memory node data:");
+ do {
+ DPAAX_DEBUG("\n %08" PRIx64 " %08zu", nodes[j].addr, nodes[j].len);
+ } while (--j);
+
+cleanup:
+ close(fd);
+ globfree(&result);
+out:
+ return nodes;
+}
+
+int
+dpaax_iova_table_populate(void)
+{
+ int ret;
+ unsigned int i, node_count;
+ size_t tot_memory_size, total_table_size;
+ struct reg_node *nodes;
+ struct dpaax_iovat_element *entry;
+
+ /* dpaax_iova_table_p is a singleton - only one instance should be
+ * created.
+ */
+ if (dpaax_iova_table_p) {
+ DPAAX_DEBUG("Multiple allocation attempt for IOVA Table (%p)",
+ dpaax_iova_table_p);
+ /* This can be an error case as well - some path not cleaning
+ * up table - but, for now, it is assumed that if IOVA Table
+ * pointer is valid, table is allocated.
+ */
+ return 0;
+ }
+
+ nodes = read_memory_node(&node_count);
+ if (nodes == NULL) {
+ DPAAX_WARN("PA->VA translation not available;");
+ DPAAX_WARN("Expect performance impact.");
+ return -1;
+ }
+
+ tot_memory_size = 0;
+ for (i = 0; i < node_count; i++)
+ tot_memory_size += nodes[i].len;
+
+ DPAAX_DEBUG("Total available PA memory size: %zu", tot_memory_size);
+
+ /* Total table size = meta data + tot_memory_size/8 */
+ total_table_size = sizeof(struct dpaax_iova_table) +
+ (sizeof(struct dpaax_iovat_element) * node_count) +
+ ((tot_memory_size / DPAAX_MEM_SPLIT) * sizeof(uint64_t));
+
+ /* TODO: This memory doesn't need to shared but needs to be always
+ * pinned to RAM (no swap out) - using hugepage rather than malloc
+ */
+ dpaax_iova_table_p = rte_zmalloc(NULL, total_table_size, 0);
+ if (dpaax_iova_table_p == NULL) {
+ DPAAX_WARN("Unable to allocate memory for PA->VA Table;");
+ DPAAX_WARN("PA->VA translation not available;");
+ DPAAX_WARN("Expect performance impact.");
+ free(nodes);
+ return -1;
+ }
+
+ /* Initialize table */
+ dpaax_iova_table_p->count = node_count;
+ entry = dpaax_iova_table_p->entries;
+
+ DPAAX_DEBUG("IOVA Table entries: (entry start = %p)", (void *)entry);
+ DPAAX_DEBUG("\t(entry),(start),(len),(next)");
+
+ for (i = 0; i < node_count; i++) {
+ /* dpaax_iova_table_p
+ * | dpaax_iova_table_p->entries
+ * | |
+ * | |
+ * V V
+ * +------+------+-------+---+----------+---------+---
+ * |iova_ |entry | entry | | pages | pages |
+ * |table | 1 | 2 |...| entry 1 | entry2 |
+ * +-----'+.-----+-------+---+;---------+;--------+---
+ * \ \ / /
+ * `~~~~~~|~~~~~>pages /
+ * \ /
+ * `~~~~~~~~~~~>pages
+ */
+ entry[i].start = nodes[i].addr;
+ entry[i].len = nodes[i].len;
+ if (i > 0)
+ entry[i].pages = entry[i-1].pages +
+ ((entry[i-1].len/DPAAX_MEM_SPLIT));
+ else
+ entry[i].pages = (uint64_t *)((unsigned char *)entry +
+ (sizeof(struct dpaax_iovat_element) *
+ node_count));
+
+ DPAAX_DEBUG("\t(%u),(%8"PRIx64"),(%8zu),(%8p)",
+ i, entry[i].start, entry[i].len, entry[i].pages);
+ }
+
+ /* Release memory associated with nodes array - not required now */
+ free(nodes);
+
+ DPAAX_DEBUG("Adding mem-event handler\n");
+ ret = dpaax_handle_memevents();
+ if (ret) {
+ DPAAX_ERR("Unable to add mem-event handler");
+ DPAAX_WARN("Cases with non-buffer pool mem won't work!");
+ }
+
+ return 0;
+}
+
+void
+dpaax_iova_table_depopulate(void)
+{
+ if (dpaax_iova_table_p == NULL)
+ return;
+
+ rte_free(dpaax_iova_table_p->entries);
+ dpaax_iova_table_p = NULL;
+
+ DPAAX_DEBUG("IOVA Table cleanedup");
+}
+
+int
+dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length)
+{
+ int found = 0;
+ unsigned int i;
+ size_t req_length = length, e_offset;
+ struct dpaax_iovat_element *entry;
+ uintptr_t align_vaddr;
+ phys_addr_t align_paddr;
+
+ if (unlikely(dpaax_iova_table_p == NULL))
+ return -1;
+
+ align_paddr = paddr & DPAAX_MEM_SPLIT_MASK;
+ align_vaddr = ((uintptr_t)vaddr & DPAAX_MEM_SPLIT_MASK);
+
+ /* Check if paddr is available in table */
+ entry = dpaax_iova_table_p->entries;
+ for (i = 0; i < dpaax_iova_table_p->count; i++) {
+ if (align_paddr < entry[i].start) {
+ /* Address lower than start, but not found in previous
+ * iteration shouldn't exist.
+ */
+ DPAAX_ERR("Add: Incorrect entry for PA->VA Table"
+ "(%"PRIu64")", paddr);
+ DPAAX_ERR("Add: Lowest address: %"PRIu64"",
+ entry[i].start);
+ return -1;
+ }
+
+ if (align_paddr > (entry[i].start + entry[i].len))
+ continue;
+
+ /* align_paddr >= start && align_paddr < (start + len) */
+ found = 1;
+
+ do {
+ e_offset = ((align_paddr - entry[i].start) / DPAAX_MEM_SPLIT);
+ /* TODO: Whatif something already exists at this
+ * location - is that an error? For now, ignoring the
+ * case.
+ */
+ entry[i].pages[e_offset] = align_vaddr;
+ DPAAX_DEBUG("Added: vaddr=%zu for Phy:%"PRIu64" at %zu"
+ " remaining len %zu", align_vaddr,
+ align_paddr, e_offset, req_length);
+
+ /* Incoming request can be larger than the
+ * DPAAX_MEM_SPLIT size - in which case, multiple
+ * entries in entry->pages[] are filled up.
+ */
+ if (req_length <= DPAAX_MEM_SPLIT)
+ break;
+ align_paddr += DPAAX_MEM_SPLIT;
+ align_vaddr += DPAAX_MEM_SPLIT;
+ req_length -= DPAAX_MEM_SPLIT;
+ } while (1);
+
+ break;
+ }
+
+ if (!found) {
+ /* There might be case where the incoming physical address is
+ * beyond the address discovered in the memory node of
+ * device-tree. Specially if some malloc'd area is used by EAL
+ * and the memevent handlers passes that across. But, this is
+ * not necessarily an error.
+ */
+ DPAAX_DEBUG("Add: Unable to find slot for vaddr:(%p),"
+ " phy(%"PRIu64")",
+ vaddr, paddr);
+ return -1;
+ }
+
+ DPAAX_DEBUG("Add: Found slot at (%"PRIu64")[(%zu)] for vaddr:(%p),"
+ " phy(%"PRIu64"), len(%zu)", entry[i].start, e_offset,
+ vaddr, paddr, length);
+ return 0;
+}
+
+/* dpaax_iova_table_dump
+ * Dump the table, with its entries, on screen. Only works in Debug Mode
+ * Not for weak hearted - the tables can get quite large
+ */
+void
+dpaax_iova_table_dump(void)
+{
+ unsigned int i, j;
+ struct dpaax_iovat_element *entry;
+
+ /* In case DEBUG is not enabled, some 'if' conditions might misbehave
+ * as they have nothing else in them except a DPAAX_DEBUG() which if
+ * tuned out would leave 'if' naked.
+ */
+ if (rte_log_get_global_level() < RTE_LOG_DEBUG) {
+ DPAAX_ERR("Set log level to Debug for PA->Table dump!");
+ return;
+ }
+
+ DPAAX_DEBUG(" === Start of PA->VA Translation Table ===");
+ if (dpaax_iova_table_p == NULL)
+ DPAAX_DEBUG("\tNULL");
+
+ entry = dpaax_iova_table_p->entries;
+ for (i = 0; i < dpaax_iova_table_p->count; i++) {
+ DPAAX_DEBUG("\t(%16i),(%16"PRIu64"),(%16zu),(%16p)",
+ i, entry[i].start, entry[i].len, entry[i].pages);
+ DPAAX_DEBUG("\t\t (PA), (VA)");
+ for (j = 0; j < (entry->len/DPAAX_MEM_SPLIT); j++) {
+ if (entry[i].pages[j] == 0)
+ continue;
+ DPAAX_DEBUG("\t\t(%16"PRIx64"),(%16"PRIx64")",
+ (entry[i].start + (j * sizeof(uint64_t))),
+ entry[i].pages[j]);
+ }
+ }
+ DPAAX_DEBUG(" === End of PA->VA Translation Table ===");
+}
+
+static void
+dpaax_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
+{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ phys_addr_t phys_addr;
+ void *virt_addr;
+ int ret;
+
+ DPAAX_DEBUG("Called with addr=%p, len=%zu", addr, len);
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ phys_addr = rte_mem_virt2phy(ms->addr);
+ virt_addr = ms->addr;
+ map_len = ms->len;
+
+ DPAAX_DEBUG("Request for %s, va=%p, virt_addr=%p,"
+ "iova=%"PRIu64", map_len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "alloc" : "dealloc",
+ va, virt_addr, phys_addr, map_len);
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = dpaax_iova_table_update(phys_addr, virt_addr,
+ map_len);
+ else
+ /* In case of mem_events for MEM_EVENT_FREE, complete
+ * hugepage is released and its PA entry is set to 0.
+ */
+ ret = dpaax_iova_table_update(phys_addr, 0, map_len);
+
+ if (ret != 0) {
+ DPAAX_DEBUG("PA-Table entry update failed. "
+ "Map=%d, addr=%p, len=%zu, err:(%d)",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+}
+
+static int
+dpaax_memevent_walk_memsegs(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, size_t len,
+ void *arg __rte_unused)
+{
+ DPAAX_DEBUG("Walking for %p (pa=%"PRIu64") and len %zu",
+ ms->addr, ms->phys_addr, len);
+ dpaax_iova_table_update(rte_mem_virt2phy(ms->addr), ms->addr, len);
+ return 0;
+}
+
+static int
+dpaax_handle_memevents(void)
+{
+ /* First, walk through all memsegs and pin them, before installing
+ * handler. This assures that all memseg which have already been
+ * identified/allocated by EAL, are already part of PA->VA Table. This
+ * is especially for cases where application allocates memory before
+ * the EAL or this is an externally allocated memory passed to EAL.
+ */
+ rte_memseg_contig_walk_thread_unsafe(dpaax_memevent_walk_memsegs, NULL);
+
+ return rte_mem_event_callback_register("dpaax_memevents_cb",
+ dpaax_memevent_cb, NULL);
+}
+
+RTE_INIT(dpaax_log)
+{
+ dpaax_logger = rte_log_register("pmd.common.dpaax");
+ if (dpaax_logger >= 0)
+ rte_log_set_level(dpaax_logger, RTE_LOG_ERR);
+}
diff --git a/drivers/common/dpaax/dpaax_iova_table.h b/drivers/common/dpaax/dpaax_iova_table.h
new file mode 100644
index 00000000..138827e7
--- /dev/null
+++ b/drivers/common/dpaax/dpaax_iova_table.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _DPAAX_IOVA_TABLE_H_
+#define _DPAAX_IOVA_TABLE_H_
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <glob.h>
+#include <errno.h>
+#include <arpa/inet.h>
+
+#include <rte_eal.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+
+struct dpaax_iovat_element {
+ phys_addr_t start; /**< Start address of block of physical pages */
+ size_t len; /**< Difference of end-start for quick access */
+ uint64_t *pages; /**< VA for each physical page in this block */
+};
+
+struct dpaax_iova_table {
+ unsigned int count; /**< No. of blocks of contiguous physical pages */
+ struct dpaax_iovat_element entries[0];
+};
+
+/* Pointer to the table, which is common for DPAA/DPAA2 and only a single
+ * instance is required across net/crypto/event drivers. This table is
+ * populated iff devices are found on the bus.
+ */
+extern struct dpaax_iova_table *dpaax_iova_table_p;
+
+/* Device tree file for memory layout is named 'memory@<addr>' where the 'addr'
+ * is SoC dependent, or even Uboot fixup dependent.
+ */
+#define MEM_NODE_PATH_GLOB "/proc/device-tree/memory[@0-9]*/reg"
+/* Device file should be multiple of 16 bytes, each containing 8 byte of addr
+ * and its length. Assuming max of 5 entries.
+ */
+#define MEM_NODE_FILE_LEN ((16 * 5) + 1)
+
+/* Table is made up of DPAAX_MEM_SPLIT elements for each contiguous zone. This
+ * helps avoid separate handling for cases where more than one size of hugepage
+ * is supported.
+ */
+#define DPAAX_MEM_SPLIT (1<<21)
+#define DPAAX_MEM_SPLIT_MASK ~(DPAAX_MEM_SPLIT - 1) /**< Floor aligned */
+#define DPAAX_MEM_SPLIT_MASK_OFF (DPAAX_MEM_SPLIT - 1) /**< Offset */
+
+/* APIs exposed */
+int dpaax_iova_table_populate(void);
+void dpaax_iova_table_depopulate(void);
+int dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length);
+void dpaax_iova_table_dump(void);
+
+static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __attribute__((hot));
+
+static inline void *
+dpaax_iova_table_get_va(phys_addr_t paddr) {
+ unsigned int i = 0, index;
+ void *vaddr = 0;
+ phys_addr_t paddr_align = paddr & DPAAX_MEM_SPLIT_MASK;
+ size_t offset = paddr & DPAAX_MEM_SPLIT_MASK_OFF;
+ struct dpaax_iovat_element *entry;
+
+ if (unlikely(dpaax_iova_table_p == NULL))
+ return NULL;
+
+ entry = dpaax_iova_table_p->entries;
+
+ do {
+ if (unlikely(i > dpaax_iova_table_p->count))
+ break;
+
+ if (paddr_align < entry[i].start) {
+ /* Incorrect paddr; Not in memory range */
+ return NULL;
+ }
+
+ if (paddr_align > (entry[i].start + entry[i].len)) {
+ i++;
+ continue;
+ }
+
+ /* paddr > entry->start && paddr <= entry->(start+len) */
+ index = (paddr_align - entry[i].start)/DPAAX_MEM_SPLIT;
+ vaddr = (void *)((uintptr_t)entry[i].pages[index] + offset);
+ break;
+ } while (1);
+
+ return vaddr;
+}
+
+#endif /* _DPAAX_IOVA_TABLE_H_ */
diff --git a/drivers/common/dpaax/dpaax_logs.h b/drivers/common/dpaax/dpaax_logs.h
new file mode 100644
index 00000000..bf1b27cc
--- /dev/null
+++ b/drivers/common/dpaax/dpaax_logs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _DPAAX_LOGS_H_
+#define _DPAAX_LOGS_H_
+
+#include <rte_log.h>
+
+extern int dpaax_logger;
+
+#define DPAAX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaax_logger, "dpaax: " fmt "\n", \
+ ##args)
+
+/* Debug logs are with Function names */
+#define DPAAX_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaax_logger, "dpaax: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define DPAAX_INFO(fmt, args...) \
+ DPAAX_LOG(INFO, fmt, ## args)
+#define DPAAX_ERR(fmt, args...) \
+ DPAAX_LOG(ERR, fmt, ## args)
+#define DPAAX_WARN(fmt, args...) \
+ DPAAX_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAAX_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAAX_DP_DEBUG(fmt, args...) \
+ DPAAX_DP_LOG(DEBUG, fmt, ## args)
+#define DPAAX_DP_INFO(fmt, args...) \
+ DPAAX_DP_LOG(INFO, fmt, ## args)
+#define DPAAX_DP_WARN(fmt, args...) \
+ DPAAX_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAAX_LOGS_H_ */
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
new file mode 100644
index 00000000..98a1bdd4
--- /dev/null
+++ b/drivers/common/dpaax/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 NXP
+
+allow_experimental_apis = true
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+sources = files('dpaax_iova_table.c')
+
+cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/common/dpaax/rte_common_dpaax_version.map b/drivers/common/dpaax/rte_common_dpaax_version.map
new file mode 100644
index 00000000..8131c9e3
--- /dev/null
+++ b/drivers/common/dpaax/rte_common_dpaax_version.map
@@ -0,0 +1,11 @@
+DPDK_18.11 {
+ global:
+
+ dpaax_iova_table_update;
+ dpaax_iova_table_depopulate;
+ dpaax_iova_table_dump;
+ dpaax_iova_table_p;
+ dpaax_iova_table_populate;
+
+ local: *;
+};
diff --git a/drivers/common/meson.build b/drivers/common/meson.build
index d7b7d8cf..a5093410 100644
--- a/drivers/common/meson.build
+++ b/drivers/common/meson.build
@@ -2,6 +2,6 @@
# Copyright(c) 2018 Cavium, Inc
std_deps = ['eal']
-drivers = ['octeontx', 'qat']
+drivers = ['cpt', 'dpaax', 'mvep', 'octeontx', 'qat']
config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
driver_name_fmt = 'rte_common_@0@'
diff --git a/drivers/common/mvep/Makefile b/drivers/common/mvep/Makefile
new file mode 100644
index 00000000..1f5f005d
--- /dev/null
+++ b/drivers/common/mvep/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_common_mvep.a
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_common_mvep_version.map
+
+# external library dependencies
+CFLAGS += -I$($RTE_SDK)/drivers/common/mvep
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_kvargs
+
+# library source files
+SRCS-y += mvep_common.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/common/mvep/meson.build b/drivers/common/mvep/meson.build
new file mode 100644
index 00000000..8ccfacb3
--- /dev/null
+++ b/drivers/common/mvep/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+#
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files('mvep_common.c')
diff --git a/drivers/common/mvep/mvep_common.c b/drivers/common/mvep/mvep_common.c
new file mode 100644
index 00000000..67fa65b5
--- /dev/null
+++ b/drivers/common/mvep/mvep_common.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+
+#include <env/mv_autogen_comp_flags.h>
+#include <env/mv_sys_dma.h>
+
+#include "rte_mvep_common.h"
+
+/* Memory size (in bytes) for MUSDK dma buffers */
+#define MRVL_MUSDK_DMA_MEMSIZE (40 * 1024 * 1024)
+
+struct mvep {
+ uint32_t ref_count;
+};
+
+static struct mvep mvep;
+
+int rte_mvep_init(enum mvep_module_type module __rte_unused,
+ struct rte_kvargs *kvlist __rte_unused)
+{
+ int ret;
+
+ if (!mvep.ref_count) {
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret)
+ return ret;
+ }
+
+ mvep.ref_count++;
+
+ return 0;
+}
+
+int rte_mvep_deinit(enum mvep_module_type module __rte_unused)
+{
+ mvep.ref_count--;
+
+ if (!mvep.ref_count)
+ mv_sys_dma_mem_destroy();
+
+ return 0;
+}
diff --git a/drivers/common/mvep/rte_common_mvep_version.map b/drivers/common/mvep/rte_common_mvep_version.map
new file mode 100644
index 00000000..c71722d7
--- /dev/null
+++ b/drivers/common/mvep/rte_common_mvep_version.map
@@ -0,0 +1,6 @@
+DPDK_18.11 {
+ global:
+
+ rte_mvep_init;
+ rte_mvep_deinit;
+};
diff --git a/drivers/common/mvep/rte_mvep_common.h b/drivers/common/mvep/rte_mvep_common.h
new file mode 100644
index 00000000..0593cefc
--- /dev/null
+++ b/drivers/common/mvep/rte_mvep_common.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ */
+
+#ifndef __RTE_MVEP_COMMON_H__
+#define __RTE_MVEP_COMMON_H__
+
+#include <rte_kvargs.h>
+
+enum mvep_module_type {
+ MVEP_MOD_T_NONE = 0,
+ MVEP_MOD_T_PP2,
+ MVEP_MOD_T_SAM,
+ MVEP_MOD_T_NETA,
+ MVEP_MOD_T_LAST
+};
+
+int rte_mvep_init(enum mvep_module_type module, struct rte_kvargs *kvlist);
+int rte_mvep_deinit(enum mvep_module_type module);
+
+#endif /* __RTE_MVEP_COMMON_H__ */
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index d4bef539..de9a3ba5 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -17,7 +17,8 @@
*/
enum qat_device_gen {
QAT_GEN1 = 1,
- QAT_GEN2
+ QAT_GEN2,
+ QAT_GEN3
};
enum qat_service_type {
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index f32d7235..2a1cf3e1 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -7,6 +7,7 @@
#include "qat_device.h"
#include "adf_transport_access_macros.h"
#include "qat_sym_pmd.h"
+#include "qat_comp_pmd.h"
/* Hardware device information per generation */
__extension__
@@ -14,11 +15,18 @@ struct qat_gen_hw_data qat_gen_config[] = {
[QAT_GEN1] = {
.dev_gen = QAT_GEN1,
.qp_hw_data = qat_gen1_qps,
+ .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN1
},
[QAT_GEN2] = {
.dev_gen = QAT_GEN2,
.qp_hw_data = qat_gen1_qps,
/* gen2 has same ring layout as gen1 */
+ .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN2
+ },
+ [QAT_GEN3] = {
+ .dev_gen = QAT_GEN3,
+ .qp_hw_data = qat_gen3_qps,
+ .comp_num_im_bufs_required = QAT_NUM_INTERM_BUFS_GEN3
},
};
@@ -43,10 +51,12 @@ static const struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x6f55),
},
+ {
+ RTE_PCI_DEVICE(0x8086, 0x18a1),
+ },
{.device_id = 0},
};
-
static struct qat_pci_device *
qat_pci_get_dev(uint8_t dev_id)
{
@@ -130,6 +140,9 @@ qat_pci_device_allocate(struct rte_pci_device *pci_dev)
case 0x6f55:
qat_dev->qat_dev_gen = QAT_GEN2;
break;
+ case 0x18a1:
+ qat_dev->qat_dev_gen = QAT_GEN3;
+ break;
default:
QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
return NULL;
@@ -187,6 +200,7 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
int ret = 0;
+ int num_pmds_created = 0;
struct qat_pci_device *qat_pci_dev;
QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
@@ -199,23 +213,33 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
return -ENODEV;
ret = qat_sym_dev_create(qat_pci_dev);
- if (ret != 0)
- goto error_out;
+ if (ret == 0)
+ num_pmds_created++;
+ else
+ QAT_LOG(WARNING,
+ "Failed to create QAT SYM PMD on device %s",
+ qat_pci_dev->name);
ret = qat_comp_dev_create(qat_pci_dev);
- if (ret != 0)
- goto error_out;
+ if (ret == 0)
+ num_pmds_created++;
+ else
+ QAT_LOG(WARNING,
+ "Failed to create QAT COMP PMD on device %s",
+ qat_pci_dev->name);
ret = qat_asym_dev_create(qat_pci_dev);
- if (ret != 0)
- goto error_out;
-
- return 0;
+ if (ret == 0)
+ num_pmds_created++;
+ else
+ QAT_LOG(WARNING,
+ "Failed to create QAT ASYM PMD on device %s",
+ qat_pci_dev->name);
-error_out:
- qat_pci_dev_destroy(qat_pci_dev, pci_dev);
- return ret;
+ if (num_pmds_created == 0)
+ qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+ return 0;
}
static int qat_pci_remove(struct rte_pci_device *pci_dev)
@@ -239,37 +263,37 @@ static struct rte_pci_driver rte_qat_pmd = {
.remove = qat_pci_remove
};
-__attribute__((weak)) int
+__rte_weak int
qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h
index 9599fc59..eb81c78f 100644
--- a/drivers/common/qat/qat_device.h
+++ b/drivers/common/qat/qat_device.h
@@ -16,6 +16,12 @@
#define QAT_DEV_NAME_MAX_LEN 64
+enum qat_comp_num_im_buffers {
+ QAT_NUM_INTERM_BUFS_GEN1 = 12,
+ QAT_NUM_INTERM_BUFS_GEN2 = 20,
+ QAT_NUM_INTERM_BUFS_GEN3 = 20
+};
+
/*
* This struct holds all the data about a QAT pci device
* including data about all services it supports.
@@ -59,6 +65,11 @@ struct qat_pci_device {
/* Data relating to compression service */
struct qat_comp_dev_private *comp_dev;
/**< link back to compressdev private data */
+ struct rte_device comp_rte_dev;
+ /**< This represents the compression subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a compression-specific name
+ */
/* Data relating to asymmetric crypto service */
@@ -67,6 +78,7 @@ struct qat_pci_device {
struct qat_gen_hw_data {
enum qat_device_gen dev_gen;
const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
+ enum qat_comp_num_im_buffers comp_num_im_bufs_required;
};
extern struct qat_gen_hw_data qat_gen_config[];
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 7ca7a45e..79f6a01b 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -90,6 +90,44 @@ const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
}
};
+__extension__
+const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_ON_ANY_SERVICE] = {
+ /* queue pairs which provide an asymmetric crypto service */
+ [QAT_SERVICE_ASYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 0,
+ .rx_ring_num = 4,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a symmetric crypto service */
+ [QAT_SERVICE_SYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 1,
+ .rx_ring_num = 5,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a compression service */
+ [QAT_SERVICE_COMPRESSION] = {
+ {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 3,
+ .rx_ring_num = 7,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ }
+};
+
static int qat_qp_check_queue_alignment(uint64_t phys_addr,
uint32_t queue_size_bytes);
static void qat_queue_delete(struct qat_queue *queue);
@@ -596,15 +634,23 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
uint32_t head;
uint32_t resp_counter = 0;
uint8_t *resp_msg;
+ uint8_t hdr_flags;
rx_queue = &(tmp_qp->rx_q);
tx_queue = &(tmp_qp->tx_q);
head = rx_queue->head;
resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
+ hdr_flags = ((struct icp_qat_fw_comn_resp_hdr *)resp_msg)->hdr_flags;
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
resp_counter != nb_ops) {
+ if (unlikely(!ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags))) {
+ /* Fatal firmware error */
+ QAT_LOG(ERR, "QAT Firmware returned invalid response");
+ return 0;
+ }
+
if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
qat_sym_process_response(ops, resp_msg);
else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
@@ -635,7 +681,7 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
return resp_counter;
}
-__attribute__((weak)) int
+__rte_weak int
qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
{
return 0;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 69f8a613..6f1525e1 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -85,6 +85,7 @@ struct qat_qp {
} __rte_cache_aligned;
extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
+extern const struct qat_qp_hw_data qat_gen3_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
diff --git a/drivers/compress/octeontx/include/zip_regs.h b/drivers/compress/octeontx/include/zip_regs.h
index 1e74db43..04c3d75e 100644
--- a/drivers/compress/octeontx/include/zip_regs.h
+++ b/drivers/compress/octeontx/include/zip_regs.h
@@ -12,12 +12,12 @@
* ZIP compression coding Enumeration
* Enumerates ZIP_INST_S[CC].
*/
-enum {
+enum zip_cc {
ZIP_CC_DEFAULT = 0,
ZIP_CC_DYN_HUFF,
ZIP_CC_FIXED_HUFF,
ZIP_CC_LZS
-} zip_cc;
+};
/**
* Register (NCB) zip_vq#_ena
diff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h
index 99a38d00..3abefd1d 100644
--- a/drivers/compress/octeontx/otx_zip.h
+++ b/drivers/compress/octeontx/otx_zip.h
@@ -79,7 +79,7 @@ int octtx_zip_logtype_driver;
ZIP_PMD_LOG(ERR, fmt, ## args)
/* resources required to process stream */
-enum {
+enum NUM_BUFS_PER_STREAM {
RES_BUF = 0,
CMD_BUF,
HASH_CTX_BUF,
@@ -88,7 +88,7 @@ enum {
OUT_DATA_BUF,
HISTORY_DATA_BUF,
MAX_BUFS_PER_STREAM
-} NUM_BUFS_PER_STREAM;
+};
struct zip_stream;
struct zipvf_qp;
@@ -106,7 +106,7 @@ struct zip_stream {
comp_func_t func;
/* function to process comp operation */
void *bufs[MAX_BUFS_PER_STREAM];
-} _rte_cache_aligned;
+} __rte_cache_aligned;
/**
diff --git a/drivers/compress/octeontx/otx_zip_pmd.c b/drivers/compress/octeontx/otx_zip_pmd.c
index 9d13f933..67ff5066 100644
--- a/drivers/compress/octeontx/otx_zip_pmd.c
+++ b/drivers/compress/octeontx/otx_zip_pmd.c
@@ -533,7 +533,7 @@ zip_pmd_dequeue_burst_sync(void *queue_pair,
return nb_dequeued;
}
-struct rte_compressdev_ops octtx_zip_pmd_ops = {
+static struct rte_compressdev_ops octtx_zip_pmd_ops = {
.dev_configure = zip_pmd_config,
.dev_start = zip_pmd_start,
.dev_stop = zip_pmd_stop,
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index 38c8a5b8..d70c5949 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -145,7 +145,6 @@ qat_comp_process_response(void **op, uint8_t *resp)
rx_op->debug_status =
*((uint16_t *)(&resp_msg->comn_resp.comn_error));
} else {
- struct qat_comp_xform *qat_xform = rx_op->private_xform;
struct icp_qat_fw_resp_comp_pars *comp_resp =
(struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
@@ -193,7 +192,7 @@ static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
}
static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
- const struct rte_memzone *interm_buff_mz __rte_unused,
+ const struct rte_memzone *interm_buff_mz,
const struct rte_comp_xform *xform)
{
struct icp_qat_fw_comp_req *comp_req;
@@ -281,10 +280,20 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
ICP_QAT_FW_SLICE_COMP);
} else if (qat_xform->qat_comp_request_type ==
- QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
- QAT_LOG(ERR, "Dynamic huffman encoding not supported");
- return -EINVAL;
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_XLAT);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_COMP);
+
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_XLAT);
+
+ comp_req->u1.xlt_pars.inter_buff_ptr =
+ interm_buff_mz->phys_addr;
}
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
@@ -335,27 +344,35 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
(struct qat_comp_xform *)*private_xform;
if (xform->type == RTE_COMP_COMPRESS) {
- if (xform->compress.deflate.huffman ==
- RTE_COMP_HUFFMAN_DYNAMIC) {
- QAT_LOG(ERR,
- "QAT device doesn't support dynamic compression");
- return -ENOTSUP;
- }
if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
&& qat->interm_buff_mz == NULL))
-
qat_xform->qat_comp_request_type =
QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
+ else if ((xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC ||
+ xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DEFAULT) &&
+ qat->interm_buff_mz != NULL)
+
+ qat_xform->qat_comp_request_type =
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
+
+ else {
+ QAT_LOG(ERR,
+ "IM buffers needed for dynamic deflate. Set size in config file");
+ return -EINVAL;
+ }
+
+ qat_xform->checksum_type = xform->compress.chksum;
} else {
qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+ qat_xform->checksum_type = xform->decompress.chksum;
}
- qat_xform->checksum_type = xform->compress.chksum;
-
if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
QAT_LOG(ERR, "QAT: Problem with setting compression");
return -EINVAL;
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index 8d315efb..99a4462e 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -15,6 +15,10 @@
#include "icp_qat_fw_comp.h"
#include "icp_qat_fw_la.h"
+#define QAT_64_BYTE_ALIGN_MASK (~0x3f)
+#define QAT_64_BYTE_ALIGN (64)
+#define QAT_NUM_BUFS_IN_IM_SGL 1
+
#define ERR_CODE_QAT_COMP_WRONG_FW -99
enum qat_comp_request_type {
@@ -24,6 +28,15 @@ enum qat_comp_request_type {
REQ_COMP_END
};
+struct array_of_ptrs {
+ phys_addr_t pointer[0];
+};
+
+struct qat_inter_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_NUM_BUFS_IN_IM_SGL];
+} __rte_packed __rte_cache_aligned;
+
struct qat_comp_sgl {
qat_sgl_hdr;
struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index b89975fc..01dd7361 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -14,6 +14,7 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC |
RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
@@ -112,7 +113,7 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
/* store a link to the qp in the qat_pci_device */
qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
- = *qp_addr;
+ = *qp_addr;
qp = (struct qat_qp *)*qp_addr;
@@ -135,6 +136,103 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
return ret;
}
+
+#define QAT_IM_BUFFER_DEBUG 0
+static const struct rte_memzone *
+qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
+ uint32_t buff_size)
+{
+ char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *memzone;
+ uint8_t *mz_start = NULL;
+ rte_iova_t mz_start_phys = 0;
+ struct array_of_ptrs *array_of_pointers;
+ int size_of_ptr_array;
+ uint32_t full_size;
+ uint32_t offset_of_sgls, offset_of_flat_buffs = 0;
+ int i;
+ int num_im_sgls = qat_gen_config[
+ comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required;
+
+ QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls",
+ comp_dev->qat_dev->name, num_im_sgls);
+ snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE,
+ "%s_inter_buff", comp_dev->qat_dev->name);
+ memzone = rte_memzone_lookup(inter_buff_mz_name);
+ if (memzone != NULL) {
+ QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already");
+ return memzone;
+ }
+
+ /* Create a memzone to hold intermediate buffers and associated
+ * meta-data needed by the firmware. The memzone contains:
+ * - a list of num_im_sgls physical pointers to sgls
+ * - the num_im_sgl sgl structures, each pointing to 2 flat buffers
+ * - the flat buffers: num_im_sgl * 2
+ * where num_im_sgls depends on the hardware generation of the device
+ */
+
+ size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t);
+ offset_of_sgls = (size_of_ptr_array + (~QAT_64_BYTE_ALIGN_MASK))
+ & QAT_64_BYTE_ALIGN_MASK;
+ offset_of_flat_buffs =
+ offset_of_sgls + num_im_sgls * sizeof(struct qat_inter_sgl);
+ full_size = offset_of_flat_buffs +
+ num_im_sgls * buff_size * QAT_NUM_BUFS_IN_IM_SGL;
+
+ memzone = rte_memzone_reserve_aligned(inter_buff_mz_name, full_size,
+ comp_dev->compressdev->data->socket_id,
+ RTE_MEMZONE_2MB, QAT_64_BYTE_ALIGN);
+ if (memzone == NULL) {
+ QAT_LOG(ERR, "Can't allocate intermediate buffers"
+ " for device %s", comp_dev->qat_dev->name);
+ return NULL;
+ }
+
+ mz_start = (uint8_t *)memzone->addr;
+ mz_start_phys = memzone->phys_addr;
+ QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
+ ", size required %d, size created %zu",
+ inter_buff_mz_name, mz_start, mz_start_phys,
+ full_size, memzone->len);
+
+ array_of_pointers = (struct array_of_ptrs *)mz_start;
+ for (i = 0; i < num_im_sgls; i++) {
+ uint32_t curr_sgl_offset =
+ offset_of_sgls + i * sizeof(struct qat_inter_sgl);
+ struct qat_inter_sgl *sgl =
+ (struct qat_inter_sgl *)(mz_start + curr_sgl_offset);
+ array_of_pointers->pointer[i] = mz_start_phys + curr_sgl_offset;
+
+ sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL;
+ sgl->num_mapped_bufs = 0;
+ sgl->resrvd = 0;
+ sgl->buffers[0].addr = mz_start_phys + offset_of_flat_buffs +
+ ((i * QAT_NUM_BUFS_IN_IM_SGL) * buff_size);
+ sgl->buffers[0].len = buff_size;
+ sgl->buffers[0].resrvd = 0;
+ sgl->buffers[1].addr = mz_start_phys + offset_of_flat_buffs +
+ (((i * QAT_NUM_BUFS_IN_IM_SGL) + 1) * buff_size);
+ sgl->buffers[1].len = buff_size;
+ sgl->buffers[1].resrvd = 0;
+
+#if QAT_IM_BUFFER_DEBUG
+ QAT_LOG(DEBUG, " : phys addr of sgl[%i] in array_of_pointers"
+ "= 0x%"PRIx64, i, array_of_pointers->pointer[i]);
+ QAT_LOG(DEBUG, " : virt address of sgl[%i] = %p", i, sgl);
+ QAT_LOG(DEBUG, " : sgl->buffers[0].addr = 0x%"PRIx64", len=%d",
+ sgl->buffers[0].addr, sgl->buffers[0].len);
+ QAT_LOG(DEBUG, " : sgl->buffers[1].addr = 0x%"PRIx64", len=%d",
+ sgl->buffers[1].addr, sgl->buffers[1].len);
+#endif
+ }
+#if QAT_IM_BUFFER_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "IM buffer memzone start:",
+ mz_start, offset_of_flat_buffs + 32);
+#endif
+ return memzone;
+}
+
static struct rte_mempool *
qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
uint32_t num_elements)
@@ -176,6 +274,12 @@ qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
static void
_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
{
+ /* Free intermediate buffers */
+ if (comp_dev->interm_buff_mz) {
+ rte_memzone_free(comp_dev->interm_buff_mz);
+ comp_dev->interm_buff_mz = NULL;
+ }
+
/* Free private_xform pool */
if (comp_dev->xformpool) {
/* Free internal mempool for private xforms */
@@ -197,6 +301,21 @@ qat_comp_dev_config(struct rte_compressdev *dev,
return -EINVAL;
}
+ if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
+ QAT_LOG(WARNING,
+ "RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
+ " QAT device can't be used for Dynamic Deflate. "
+ "Did you really intend to do this?");
+ } else {
+ comp_dev->interm_buff_mz =
+ qat_comp_setup_inter_buffers(comp_dev,
+ RTE_PMD_QAT_COMP_IM_BUFFER_SIZE);
+ if (comp_dev->interm_buff_mz == NULL) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ }
+
comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
config->max_nb_priv_xforms);
if (comp_dev->xformpool == NULL) {
@@ -348,6 +467,16 @@ static struct rte_compressdev_ops compress_qat_ops = {
.private_xform_free = qat_comp_private_xform_free
};
+/* An rte_driver is needed in the registration of the device with compressdev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the compression part of the pci device.
+ */
+static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD);
+static const struct rte_driver compdev_qat_driver = {
+ .name = qat_comp_drv_name,
+ .alias = qat_comp_drv_name
+};
int
qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
{
@@ -355,6 +484,10 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc");
return 0;
}
+ if (qat_pci_dev->qat_dev_gen == QAT_GEN3) {
+ QAT_LOG(ERR, "Compression PMD not supported on QAT c4xxx");
+ return 0;
+ }
struct rte_compressdev_pmd_init_params init_params = {
.name = "",
@@ -368,8 +501,14 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
qat_pci_dev->name, "comp");
QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
+ /* Populate subset device to use in compressdev device creation */
+ qat_pci_dev->comp_rte_dev.driver = &compdev_qat_driver;
+ qat_pci_dev->comp_rte_dev.numa_node =
+ qat_pci_dev->pci_dev->device.numa_node;
+ qat_pci_dev->comp_rte_dev.devargs = NULL;
+
compressdev = rte_compressdev_pmd_create(name,
- &qat_pci_dev->pci_dev->device,
+ &(qat_pci_dev->comp_rte_dev),
sizeof(struct qat_comp_dev_private),
&init_params);
@@ -391,6 +530,7 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
switch (qat_pci_dev->qat_dev_gen) {
case QAT_GEN1:
case QAT_GEN2:
+ case QAT_GEN3:
comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
break;
default:
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
index 9ad2a283..b8299d43 100644
--- a/drivers/compress/qat/qat_comp_pmd.h
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -12,6 +12,9 @@
#include "qat_device.h"
+/**< Intel(R) QAT Compression PMD driver name */
+#define COMPRESSDEV_NAME_QAT_PMD compress_qat
+
/** private data structure for a QAT compression device.
* This QAT device is a device offering only a compression service,
* there can be one of these on each qat_pci_device (VF).
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c480cbd3..009f8443 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -7,6 +7,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += octeontx
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
@@ -14,12 +15,15 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
-endif
+endif # CONFIG_RTE_LIBRTE_FSLMC_BUS
ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
-endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr
+endif # CONFIG_RTE_LIBRTE_PMD_DPAA_SEC
+endif # CONFIG_RTE_LIBRTE_SECURITY
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 752e0cd6..ebdf7c35 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -23,7 +23,6 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
{
const struct rte_crypto_sym_xform *auth_xform;
const struct rte_crypto_sym_xform *aead_xform;
- uint16_t digest_length;
uint8_t key_length;
uint8_t *key;
@@ -47,7 +46,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
key_length = auth_xform->auth.key.length;
key = auth_xform->auth.key.data;
- digest_length = auth_xform->auth.digest_length;
+ sess->req_digest_length = auth_xform->auth.digest_length;
/* AES-GCM */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
@@ -73,7 +72,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
key = aead_xform->aead.key.data;
sess->aad_length = aead_xform->aead.aad_length;
- digest_length = aead_xform->aead.digest_length;
+ sess->req_digest_length = aead_xform->aead.digest_length;
} else {
AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
return -ENOTSUP;
@@ -106,13 +105,28 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
gcm_ops[sess->key].precomp(key, &sess->gdata_key);
/* Digest check */
- if (digest_length != 16 &&
- digest_length != 12 &&
- digest_length != 8) {
+ if (sess->req_digest_length > 16) {
AESNI_GCM_LOG(ERR, "Invalid digest length");
return -EINVAL;
}
- sess->digest_length = digest_length;
+ /*
+ * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+ * in version 0.50 and sizes of 8, 12 and 16 bytes,
+ * in version 0.49.
+ * If size requested is different, generate the full digest
+ * (16 bytes) in a temporary location and then memcpy
+ * the requested number of bytes.
+ */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (sess->req_digest_length < 4)
+#else
+ if (sess->req_digest_length != 16 &&
+ sess->req_digest_length != 12 &&
+ sess->req_digest_length != 8)
+#endif
+ sess->gen_digest_length = 16;
+ else
+ sess->gen_digest_length = sess->req_digest_length;
return 0;
}
@@ -180,6 +194,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
struct rte_mbuf *m_src = sym_op->m_src;
uint32_t offset, data_offset, data_length;
uint32_t part_len, total_len, data_len;
+ uint8_t *tag;
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
@@ -225,17 +240,8 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
session->iv.offset);
- /*
- * GCM working in 12B IV mode => 16B pre-counter block we need
- * to set BE LSB to 1, driver expects that 16B is allocated
- */
- if (session->iv.length == 12) {
- uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
- *iv_padd = rte_bswap32(1);
- }
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
-
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
@@ -263,13 +269,16 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
total_len -= part_len;
}
+ if (session->req_digest_length != session->gen_digest_length)
+ tag = qp->temp_digest;
+ else
+ tag = sym_op->aead.digest.data;
+
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- sym_op->aead.digest.data,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
- uint8_t *auth_tag = qp->temp_digest;
-
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
@@ -298,33 +307,41 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
total_len -= part_len;
}
+ tag = qp->temp_digest;
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- auth_tag,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
} else if (session->op == AESNI_GMAC_OP_GENERATE) {
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
src,
(uint64_t)data_length);
+ if (session->req_digest_length != session->gen_digest_length)
+ tag = qp->temp_digest;
+ else
+ tag = sym_op->auth.digest.data;
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- sym_op->auth.digest.data,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
} else { /* AESNI_GMAC_OP_VERIFY */
- uint8_t *auth_tag = qp->temp_digest;
-
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
src,
(uint64_t)data_length);
+ /*
+ * Generate always 16 bytes and later compare only
+ * the bytes passed.
+ */
+ tag = qp->temp_digest;
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- auth_tag,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
}
return 0;
@@ -361,13 +378,22 @@ post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
rte_hexdump(stdout, "auth tag (orig):",
- digest, session->digest_length);
+ digest, session->req_digest_length);
rte_hexdump(stdout, "auth tag (calc):",
- tag, session->digest_length);
+ tag, session->req_digest_length);
#endif
- if (memcmp(tag, digest, session->digest_length) != 0)
+ if (memcmp(tag, digest, session->req_digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ if (session->req_digest_length != session->gen_digest_length) {
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
+ memcpy(op->sym->aead.digest.data, qp->temp_digest,
+ session->req_digest_length);
+ else
+ memcpy(op->sym->auth.digest.data, qp->temp_digest,
+ session->req_digest_length);
+ }
}
}
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index b6b4dd02..c343a393 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -24,9 +24,9 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.increment = 8
},
.digest_size = {
- .min = 8,
+ .min = 1,
.max = 16,
- .increment = 4
+ .increment = 1
},
.iv_size = {
.min = 12,
@@ -49,9 +49,9 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.increment = 8
},
.digest_size = {
- .min = 8,
+ .min = 1,
.max = 16,
- .increment = 4
+ .increment = 1
},
.aad_size = {
.min = 0,
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index c13a12a5..92b04135 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -76,8 +76,10 @@ struct aesni_gcm_session {
/**< IV parameters */
uint16_t aad_length;
/**< AAD length */
- uint16_t digest_length;
- /**< Digest length */
+ uint16_t req_digest_length;
+ /**< Requested digest length */
+ uint16_t gen_digest_length;
+ /**< Generated digest length */
enum aesni_gcm_operation op;
/**< GCM operation type */
enum aesni_gcm_key key;
diff --git a/drivers/crypto/aesni_gcm/meson.build b/drivers/crypto/aesni_gcm/meson.build
new file mode 100644
index 00000000..a02da1ef
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('aesni_gcm_pmd.c', 'aesni_gcm_pmd_ops.c')
+deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 5a1cba6c..575d6a5b 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -11,6 +11,15 @@
#include <intel-ipsec-mb.h>
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
enum aesni_mb_vector_mode {
RTE_AESNI_MB_NOT_SUPPORTED = 0,
RTE_AESNI_MB_SSE,
@@ -39,6 +48,8 @@ typedef void (*aes_cmac_sub_key_gen_t)
(const void *exp_key, void *k2, void *k3);
typedef void (*aes_cmac_keyexp_t)
(const void *key, void *keyexp);
+typedef void (*aes_gcm_keyexp_t)
+ (const void *key, struct gcm_key_data *keyexp);
/** Multi-buffer library function pointer table */
struct aesni_mb_op_fns {
@@ -86,8 +97,24 @@ struct aesni_mb_op_fns {
/**< AES CMAC subkey expansions */
aes_cmac_keyexp_t aes_cmac_expkey;
/**< AES CMAC key expansions */
+ aes_gcm_keyexp_t aes_gcm_128;
+ /**< AES GCM 128 key expansions */
+ aes_gcm_keyexp_t aes_gcm_192;
+ /**< AES GCM 192 key expansions */
+ aes_gcm_keyexp_t aes_gcm_256;
+ /**< AES GCM 256 key expansions */
} keyexp;
/**< Key expansion functions */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ struct {
+ hash_fn_t sha1;
+ hash_fn_t sha224;
+ hash_fn_t sha256;
+ hash_fn_t sha384;
+ hash_fn_t sha512;
+ } multi_block;
+ /** multi block hash functions */
+#endif
} aux;
/**< Auxiliary functions */
};
@@ -104,7 +131,13 @@ static const struct aesni_mb_op_fns job_ops[] = {
},
.keyexp = {
NULL
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ NULL
}
+#endif
+
}
},
[RTE_AESNI_MB_SSE] = {
@@ -130,8 +163,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_sse,
aes_xcbc_expand_key_sse,
aes_cmac_subkey_gen_sse,
- aes_keyexp_128_enc_sse
+ aes_keyexp_128_enc_sse,
+ aes_gcm_pre_128_sse,
+ aes_gcm_pre_192_sse,
+ aes_gcm_pre_256_sse
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_sse,
+ sha224_sse,
+ sha256_sse,
+ sha384_sse,
+ sha512_sse
}
+#endif
}
},
[RTE_AESNI_MB_AVX] = {
@@ -157,8 +202,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_avx,
aes_xcbc_expand_key_avx,
aes_cmac_subkey_gen_avx,
- aes_keyexp_128_enc_avx
+ aes_keyexp_128_enc_avx,
+ aes_gcm_pre_128_avx_gen2,
+ aes_gcm_pre_192_avx_gen2,
+ aes_gcm_pre_256_avx_gen2
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_avx,
+ sha224_avx,
+ sha256_avx,
+ sha384_avx,
+ sha512_avx
}
+#endif
}
},
[RTE_AESNI_MB_AVX2] = {
@@ -184,8 +241,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_avx2,
aes_xcbc_expand_key_avx2,
aes_cmac_subkey_gen_avx2,
- aes_keyexp_128_enc_avx2
+ aes_keyexp_128_enc_avx2,
+ aes_gcm_pre_128_avx_gen4,
+ aes_gcm_pre_192_avx_gen4,
+ aes_gcm_pre_256_avx_gen4
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_avx2,
+ sha224_avx2,
+ sha256_avx2,
+ sha384_avx2,
+ sha512_avx2
}
+#endif
}
},
[RTE_AESNI_MB_AVX512] = {
@@ -211,8 +280,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_avx512,
aes_xcbc_expand_key_avx512,
aes_cmac_subkey_gen_avx512,
- aes_keyexp_128_enc_avx512
+ aes_keyexp_128_enc_avx512,
+ aes_gcm_pre_128_avx_gen4,
+ aes_gcm_pre_192_avx_gen4,
+ aes_gcm_pre_256_avx_gen4
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_avx512,
+ sha224_avx512,
+ sha256_avx512,
+ sha384_avx512,
+ sha512_avx512
}
+#endif
}
}
};
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
new file mode 100644
index 00000000..aae0995e
--- /dev/null
+++ b/drivers/crypto/aesni_mb/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
+deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 93dc7a44..83250e32 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -14,6 +14,9 @@
#include "rte_aesni_mb_pmd_private.h"
+#define AES_CCM_DIGEST_MIN_LEN 4
+#define AES_CCM_DIGEST_MAX_LEN 16
+#define HMAC_MAX_BLOCK_SIZE 128
static uint8_t cryptodev_driver_id;
typedef void (*hash_one_block_t)(const void *data, void *digest);
@@ -83,7 +86,8 @@ aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
}
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
+ xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
return AESNI_MB_OP_AEAD_CIPHER_HASH;
else
@@ -101,6 +105,8 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
+ unsigned int key_larger_block_size = 0;
+ uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
if (xform == NULL) {
sess->auth.algo = NULL_HASH;
@@ -112,12 +118,23 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return -1;
}
+ /* Set the request digest size */
+ sess->auth.req_digest_len = xform->auth.digest_length;
+
/* Select auth generate/verify */
sess->auth.operation = xform->auth.op;
/* Set Authentication Parameters */
if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
sess->auth.algo = AES_XCBC;
+
+ uint16_t xcbc_mac_digest_len =
+ get_truncated_digest_byte_length(AES_XCBC);
+ if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
+ return -EINVAL;
+ }
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
(*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
sess->auth.xcbc.k1_expanded,
sess->auth.xcbc.k2, sess->auth.xcbc.k3);
@@ -126,6 +143,32 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
sess->auth.algo = AES_CMAC;
+
+ uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
+
+ if (sess->auth.req_digest_len > cmac_digest_len) {
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
+ return -EINVAL;
+ }
+ /*
+ * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+ * in version 0.50 and sizes of 12 and 16 bytes,
+ * in version 0.49.
+ * If size requested is different, generate the full digest
+ * (16 bytes) in a temporary location and then memcpy
+ * the requested number of bytes.
+ */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (sess->auth.req_digest_len < 4)
+#else
+ uint16_t cmac_trunc_digest_len =
+ get_truncated_digest_byte_length(AES_CMAC);
+ if (sess->auth.req_digest_len != cmac_digest_len &&
+ sess->auth.req_digest_len != cmac_trunc_digest_len)
+#endif
+ sess->auth.gen_digest_len = cmac_digest_len;
+ else
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
(*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
sess->auth.cmac.expkey);
@@ -134,7 +177,6 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return 0;
}
-
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->auth.algo = MD5;
@@ -143,34 +185,107 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
case RTE_CRYPTO_AUTH_SHA1_HMAC:
sess->auth.algo = SHA1;
hash_oneblock_fn = mb_ops->aux.one_block.sha1;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
+ mb_ops->aux.multi_block.sha1(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
sess->auth.algo = SHA_224;
hash_oneblock_fn = mb_ops->aux.one_block.sha224;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
+ mb_ops->aux.multi_block.sha224(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sess->auth.algo = SHA_256;
hash_oneblock_fn = mb_ops->aux.one_block.sha256;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
+ mb_ops->aux.multi_block.sha256(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
sess->auth.algo = SHA_384;
hash_oneblock_fn = mb_ops->aux.one_block.sha384;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
+ mb_ops->aux.multi_block.sha384(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
sess->auth.algo = SHA_512;
hash_oneblock_fn = mb_ops->aux.one_block.sha512;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
+ mb_ops->aux.multi_block.sha512(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
default:
AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
return -ENOTSUP;
}
+ uint16_t trunc_digest_size =
+ get_truncated_digest_byte_length(sess->auth.algo);
+ uint16_t full_digest_size =
+ get_digest_byte_length(sess->auth.algo);
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (sess->auth.req_digest_len > full_digest_size ||
+ sess->auth.req_digest_len == 0) {
+#else
+ if (sess->auth.req_digest_len != trunc_digest_size) {
+#endif
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
+ return -EINVAL;
+ }
+
+ if (sess->auth.req_digest_len != trunc_digest_size &&
+ sess->auth.req_digest_len != full_digest_size)
+ sess->auth.gen_digest_len = full_digest_size;
+ else
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
/* Calculate Authentication precomputes */
- calculate_auth_precomputes(hash_oneblock_fn,
+ if (key_larger_block_size) {
+ calculate_auth_precomputes(hash_oneblock_fn,
+ sess->auth.pads.inner, sess->auth.pads.outer,
+ hashed_key,
+ xform->auth.key.length,
+ get_auth_algo_blocksize(sess->auth.algo));
+ } else {
+ calculate_auth_precomputes(hash_oneblock_fn,
sess->auth.pads.inner, sess->auth.pads.outer,
xform->auth.key.data,
xform->auth.key.length,
get_auth_algo_blocksize(sess->auth.algo));
+ }
return 0;
}
@@ -330,7 +445,10 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- aes_keyexp_t aes_keyexp_fn;
+ union {
+ aes_keyexp_t aes_keyexp_fn;
+ aes_gcm_keyexp_t aes_gcm_keyexp_fn;
+ } keyexp;
switch (xform->aead.op) {
case RTE_CRYPTO_AEAD_OP_ENCRYPT:
@@ -350,7 +468,53 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
case RTE_CRYPTO_AEAD_AES_CCM:
sess->cipher.mode = CCM;
sess->auth.algo = AES_CCM;
+
+ /* Check key length and choose key expansion function for AES */
+ switch (xform->aead.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ keyexp.aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*keyexp.aes_keyexp_fn)(xform->aead.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+ break;
+
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.mode = GCM;
+ sess->auth.algo = AES_GMAC;
+
+ switch (xform->aead.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ keyexp.aes_gcm_keyexp_fn =
+ mb_ops->aux.keyexp.aes_gcm_128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ keyexp.aes_gcm_keyexp_fn =
+ mb_ops->aux.keyexp.aes_gcm_192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ keyexp.aes_gcm_keyexp_fn =
+ mb_ops->aux.keyexp.aes_gcm_256;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ (keyexp.aes_gcm_keyexp_fn)(xform->aead.key.data,
+ &sess->cipher.gcm_key);
break;
+
default:
AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
return -ENOTSUP;
@@ -360,22 +524,15 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->iv.offset = xform->aead.iv.offset;
sess->iv.length = xform->aead.iv.length;
- /* Check key length and choose key expansion function for AES */
-
- switch (xform->aead.key.length) {
- case AES_128_BYTES:
- sess->cipher.key_length_in_bytes = AES_128_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
- break;
- default:
- AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ sess->auth.req_digest_len = xform->aead.digest_length;
+ /* CCM digests must be between 4 and 16 and an even number */
+ if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
+ sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
+ (sess->auth.req_digest_len & 1) == 1) {
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
return -EINVAL;
}
-
- /* Expanded cipher keys */
- (*aes_keyexp_fn)(xform->aead.key.data,
- sess->cipher.expanded_aes_keys.encode,
- sess->cipher.expanded_aes_keys.decode);
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
return 0;
}
@@ -397,19 +554,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
- sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
- sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_HASH_ONLY:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = NULL;
- sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_ONLY:
/*
@@ -428,13 +582,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_AEAD_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
sess->aead.aad_len = xform->aead.aad_length;
- sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_AEAD_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
sess->aead.aad_len = xform->aead.aad_length;
- sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_NOT_SUPPORTED:
@@ -573,38 +725,62 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
- if (job->cipher_mode == DES3) {
- job->aes_enc_key_expanded =
- session->cipher.exp_3des_keys.ks_ptr;
- job->aes_dec_key_expanded =
- session->cipher.exp_3des_keys.ks_ptr;
- } else {
- job->aes_enc_key_expanded =
- session->cipher.expanded_aes_keys.encode;
- job->aes_dec_key_expanded =
- session->cipher.expanded_aes_keys.decode;
- }
-
-
-
-
/* Set authentication parameters */
job->hash_alg = session->auth.algo;
- if (job->hash_alg == AES_XCBC) {
+
+ switch (job->hash_alg) {
+ case AES_XCBC:
job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
job->u.XCBC._k2 = session->auth.xcbc.k2;
job->u.XCBC._k3 = session->auth.xcbc.k3;
- } else if (job->hash_alg == AES_CCM) {
+
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
+
+ case AES_CCM:
job->u.CCM.aad = op->sym->aead.aad.data + 18;
job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
- } else if (job->hash_alg == AES_CMAC) {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
+
+ case AES_CMAC:
job->u.CMAC._key_expanded = session->auth.cmac.expkey;
job->u.CMAC._skey1 = session->auth.cmac.skey1;
job->u.CMAC._skey2 = session->auth.cmac.skey2;
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
- } else {
+ case AES_GMAC:
+ job->u.GCM.aad = op->sym->aead.aad.data;
+ job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+ job->aes_enc_key_expanded = &session->cipher.gcm_key;
+ job->aes_dec_key_expanded = &session->cipher.gcm_key;
+ break;
+
+ default:
job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
}
/* Mutable crypto operation parameters */
@@ -625,7 +801,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
rte_pktmbuf_data_len(op->sym->m_src));
} else {
m_dst = m_src;
- if (job->hash_alg == AES_CCM)
+ if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
m_offset = op->sym->aead.data.offset;
else
m_offset = op->sym->cipher.data.offset;
@@ -637,32 +813,33 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->auth_tag_output = qp->temp_digests[*digest_idx];
*digest_idx = (*digest_idx + 1) % MAX_JOBS;
} else {
- if (job->hash_alg == AES_CCM)
+ if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
job->auth_tag_output = op->sym->aead.digest.data;
else
job->auth_tag_output = op->sym->auth.digest.data;
- }
+ if (session->auth.req_digest_len != session->auth.gen_digest_len) {
+ job->auth_tag_output = qp->temp_digests[*digest_idx];
+ *digest_idx = (*digest_idx + 1) % MAX_JOBS;
+ }
+ }
/*
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
- if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
- job->auth_tag_output_len_in_bytes =
- get_truncated_digest_byte_length(job->hash_alg);
- else
- job->auth_tag_output_len_in_bytes = session->auth.digest_len;
+ /* Set digest length */
+ job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
/* Set IV parameters */
-
job->iv_len_in_bytes = session->iv.length;
/* Data Parameter */
job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- if (job->hash_alg == AES_CCM) {
+ switch (job->hash_alg) {
+ case AES_CCM:
job->cipher_start_src_offset_in_bytes =
op->sym->aead.data.offset;
job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
@@ -671,7 +848,19 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
session->iv.offset + 1);
- } else {
+ break;
+
+ case AES_GMAC:
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->aead.data.offset;
+ job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+ job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ break;
+
+ default:
job->cipher_start_src_offset_in_bytes =
op->sym->cipher.data.offset;
job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
@@ -690,20 +879,37 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
}
static inline void
-verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
- struct rte_crypto_op *op) {
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+ struct aesni_mb_session *sess)
+{
/* Verify digest if required */
- if (job->hash_alg == AES_CCM) {
+ if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC) {
if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
+ sess->auth.req_digest_len) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
+ sess->auth.req_digest_len) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
}
+static inline void
+generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+ struct aesni_mb_session *sess)
+{
+ /* No extra copy neeed */
+ if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
+ return;
+
+ /*
+ * This can only happen for HMAC, so only digest
+ * for authentication algos is required
+ */
+ memcpy(op->sym->auth.digest.data, job->auth_tag_output,
+ sess->auth.req_digest_len);
+}
+
/**
* Process a completed job and return rte_mbuf which job processed
*
@@ -730,7 +936,9 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (job->hash_alg != NULL_HASH) {
if (sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
- verify_digest(qp, job, op);
+ verify_digest(job, op, sess);
+ else
+ generate_digest(job, op, sess);
}
break;
default:
@@ -833,22 +1041,30 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint8_t digest_idx = qp->digest_idx;
do {
- /* Get next operation to process from ingress queue */
- retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
- if (retval < 0)
- break;
-
/* Get next free mb job struct from mb manager */
job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
if (unlikely(job == NULL)) {
/* if no free mb job structs we need to flush mb_mgr */
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
- (nb_ops - processed_jobs) - 1);
+ nb_ops - processed_jobs);
+
+ if (nb_ops == processed_jobs)
+ break;
job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
}
+ /*
+ * Get next operation to process from ingress queue.
+ * There is no need to return the job to the MB_MGR
+ * if there are no more operations to process, since the MB_MGR
+ * can use that pointer again in next get_next calls.
+ */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
+
retval = set_mb_job_params(job, qp, op, &digest_idx);
if (unlikely(retval != 0)) {
qp->stats.dequeue_err_count++;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index ab26e5ae..43f6c26e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -25,9 +25,15 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 16,
+ .increment = 1
+#else
.min = 12,
.max = 12,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -42,13 +48,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 64,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 64,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 20,
+ .increment = 1
+#else
.min = 12,
.max = 12,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -63,13 +79,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 64,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 64,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 28,
+ .increment = 1
+#else
.min = 14,
.max = 14,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -84,13 +110,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 64,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 64,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 32,
+ .increment = 1
+#else
.min = 16,
.max = 16,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -105,13 +141,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 128,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 128,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 48,
+ .increment = 1
+#else
.min = 24,
.max = 24,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -126,13 +172,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 128,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 128,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 64,
+ .increment = 1
+#else
.min = 32,
.max = 32,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -322,14 +378,44 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.increment = 0
},
.digest_size = {
- .min = 12,
+ .min = 1,
.max = 16,
- .increment = 4
+ .increment = 1
},
.iv_size = { 0 }
}, }
}, }
},
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 70e9d18e..d8021cda 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -7,15 +7,6 @@
#include "aesni_mb_ops.h"
-/*
- * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
- * so if macro is not defined, it means that the version is 0.49.
- */
-#if !defined(IMB_VERSION_NUM)
-#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
-#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
-#endif
-
#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
/**< AES-NI Multi buffer PMD device name */
@@ -31,8 +22,8 @@ int aesni_mb_logtype_driver;
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
-/* Maximum length for digest (SHA-512 truncated needs 32 bytes) */
-#define DIGEST_LENGTH_MAX 32
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
static const unsigned auth_blocksize[] = {
[MD5] = 64,
[SHA1] = 64,
@@ -64,7 +55,7 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
- [AES_CMAC] = 16,
+ [AES_CMAC] = 12,
[AES_CCM] = 8,
[NULL_HASH] = 0
};
@@ -91,11 +82,13 @@ static const unsigned auth_digest_byte_lengths[] = {
[SHA_512] = 64,
[AES_XCBC] = 16,
[AES_CMAC] = 16,
+ [AES_GMAC] = 12,
[NULL_HASH] = 0
};
/**
- * Get the output digest size in bytes for a specified authentication algorithm
+ * Get the full digest size in bytes for a specified authentication algorithm
+ * (if available in the Multi-buffer library)
*
* @Note: this function will not return a valid value for a non-valid
* authentication algorithm
@@ -180,6 +173,8 @@ struct aesni_mb_session {
const void *ks_ptr[3];
uint64_t key[3][16];
} exp_3des_keys;
+
+ struct gcm_key_data gcm_key;
};
/**< Expanded AES keys - Allocating space to
* contain the maximum expanded key size which
@@ -226,8 +221,10 @@ struct aesni_mb_session {
} cmac;
/**< Expanded XCBC authentication keys */
};
- /** digest size */
- uint16_t digest_len;
+ /** Generated digest size by the Multi-buffer library */
+ uint16_t gen_digest_len;
+ /** Requested digest size from Cryptodev */
+ uint16_t req_digest_len;
} auth;
struct {
diff --git a/drivers/crypto/caam_jr/Makefile b/drivers/crypto/caam_jr/Makefile
new file mode 100644
index 00000000..88cdf741
--- /dev/null
+++ b/drivers/crypto/caam_jr/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_caam_jr.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+CFLAGS += -D _GNU_SOURCE
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/caam_jr
+#sharing the hw flib headers from dpaa2_sec pmd
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_caam_jr_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr_capabilities.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr_uio.c
+# library dependencies
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/caam_jr/caam_jr.c b/drivers/crypto/caam_jr/caam_jr.c
new file mode 100644
index 00000000..f505adf6
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr.c
@@ -0,0 +1,2508 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sched.h>
+#include <net/if.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_security_driver.h>
+#include <rte_hexdump.h>
+
+#include <caam_jr_capabilities.h>
+#include <caam_jr_config.h>
+#include <caam_jr_hw_specific.h>
+#include <caam_jr_pvt.h>
+#include <caam_jr_desc.h>
+#include <caam_jr_log.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <of.h>
+
+#define CAAM_JR_DBG 0
+#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
+static uint8_t cryptodev_driver_id;
+int caam_jr_logtype;
+
+enum rta_sec_era rta_sec_era;
+
+/* Lists the states possible for the SEC user space driver. */
+enum sec_driver_state_e {
+ SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
+ SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/
+ SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */
+};
+
+/* Job rings used for communication with SEC HW */
+static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
+
+/* The current state of SEC user space driver */
+static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
+
+/* The number of job rings used by SEC user space driver */
+static int g_job_rings_no;
+static int g_job_rings_max;
+
+struct sec_outring_entry {
+ phys_addr_t desc; /* Pointer to completed descriptor */
+ uint32_t status; /* Status for completed descriptor */
+} __rte_packed;
+
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
+{
+ PMD_INIT_FUNC_TRACE();
+ return (size_t)vaddr - ctx->vtop_offset;
+}
+
+static inline void
+caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
+{
+ PMD_INIT_FUNC_TRACE();
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+}
+
+static inline struct caam_jr_op_ctx *
+caam_jr_alloc_ctx(struct caam_jr_session *ses)
+{
+ struct caam_jr_op_ctx *ctx;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+ if (!ctx || ret) {
+ CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
+ return NULL;
+ }
+ /*
+ * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+ * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+ * to clear all the SG entries. caam_jr_alloc_ctx() is called for
+ * each packet, memset is costlier than dcbz_64().
+ */
+ dcbz_64(&ctx->sg[SG_CACHELINE_0]);
+ dcbz_64(&ctx->sg[SG_CACHELINE_1]);
+ dcbz_64(&ctx->sg[SG_CACHELINE_2]);
+ dcbz_64(&ctx->sg[SG_CACHELINE_3]);
+
+ ctx->ctx_pool = ses->ctx_pool;
+ ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+ return ctx;
+}
+
+static
+void caam_jr_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct caam_jr_qp **qp = (struct caam_jr_qp **)
+ dev->data->queue_pairs;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ CAAM_JR_ERR("Invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ CAAM_JR_WARN("Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->tx_pkts;
+ stats->dequeued_count += qp[i]->rx_pkts;
+ stats->enqueue_err_count += qp[i]->tx_errs;
+ stats->dequeue_err_count += qp[i]->rx_errs;
+ CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
+ "\n\tTX Ring Full = %" PRIu64,
+ qp[i]->rx_poll_err,
+ qp[i]->tx_ring_full);
+ }
+}
+
+static
+void caam_jr_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct caam_jr_qp **qp = (struct caam_jr_qp **)
+ (dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ CAAM_JR_WARN("Uninitialised queue pair");
+ continue;
+ }
+ qp[i]->rx_pkts = 0;
+ qp[i]->rx_errs = 0;
+ qp[i]->rx_poll_err = 0;
+ qp[i]->tx_pkts = 0;
+ qp[i]->tx_errs = 0;
+ qp[i]->tx_ring_full = 0;
+ }
+}
+
+static inline int
+is_cipher_only(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int
+is_auth_only(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int
+is_aead(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg == 0) &&
+ (ses->auth_alg == 0) &&
+ (ses->aead_alg != 0));
+}
+
+static inline int
+is_auth_cipher(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+ (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int
+is_proto_ipsec(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
+}
+
+static inline int
+is_encode(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ses->dir == DIR_ENC;
+}
+
+static inline int
+is_decode(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ses->dir == DIR_DEC;
+}
+
+static inline void
+caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
+{
+ PMD_INIT_FUNC_TRACE();
+ switch (ses->auth_alg) {
+ case RTE_CRYPTO_AUTH_NULL:
+ ses->digest_length = 0;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ default:
+ CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
+ }
+}
+
+static inline void
+caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
+{
+ PMD_INIT_FUNC_TRACE();
+ switch (ses->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CTR;
+ break;
+ default:
+ CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
+ }
+}
+
+static inline void
+caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
+{
+ PMD_INIT_FUNC_TRACE();
+ switch (ses->aead_alg) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ alginfo->algtype = OP_ALG_ALGSEL_AES;
+ alginfo->algmode = OP_ALG_AAI_GCM;
+ break;
+ default:
+ CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
+ }
+}
+
+/* prepare command block of the session */
+static int
+caam_jr_prep_cdb(struct caam_jr_session *ses)
+{
+ struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
+ int32_t shared_desc_len = 0;
+ struct sec_cdb *cdb;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ PMD_INIT_FUNC_TRACE();
+ if (ses->cdb)
+ caam_jr_dma_free(ses->cdb);
+
+ cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
+ if (!cdb) {
+ CAAM_JR_ERR("failed to allocate memory for cdb\n");
+ return -1;
+ }
+
+ ses->cdb = cdb;
+
+ memset(cdb, 0, sizeof(struct sec_cdb));
+
+ if (is_cipher_only(ses)) {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported cipher alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_blkcipher(
+ cdb->sh_desc, true,
+ swap, &alginfo_c,
+ NULL,
+ ses->iv.length,
+ ses->dir);
+ } else if (is_auth_only(ses)) {
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported auth alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
+ swap, &alginfo_a,
+ !ses->dir,
+ ses->digest_length);
+ } else if (is_aead(ses)) {
+ caam_aead_alg(ses, &alginfo);
+ if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported aead alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+ alginfo.key = (size_t)ses->aead_key.data;
+ alginfo.keylen = ses->aead_key.length;
+ alginfo.key_enc_flags = 0;
+ alginfo.key_type = RTA_DATA_IMM;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_gcm_encap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ else
+ shared_desc_len = cnstr_shdsc_gcm_decap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ } else {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported cipher alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported auth alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = alginfo_c.keylen;
+ cdb->sh_desc[1] = alginfo_a.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ CAAM_JR_ERR("Crypto: Incorrect key lengths");
+ rte_free(cdb);
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ alginfo_c.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_c.key = (size_t)caam_jr_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
+ alginfo_c.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ alginfo_a.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_a.key = (size_t)caam_jr_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
+ alginfo_a.key_type = RTA_DATA_PTR;
+ }
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+ if (is_proto_ipsec(ses)) {
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &alginfo_c, &alginfo_a);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->decap_pdb,
+ &alginfo_c, &alginfo_a);
+ }
+ } else {
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
+ }
+ }
+
+ if (shared_desc_len < 0) {
+ CAAM_JR_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
+#if CAAM_JR_DBG
+ SEC_DUMP_DESC(cdb->sh_desc);
+#endif
+
+ cdb->sh_hdr.hi.field.idlen = shared_desc_len;
+
+ return 0;
+}
+
+/* @brief Poll the HW for already processed jobs in the JR
+ * and silently discard the available jobs or notify them to UA
+ * with indicated error code.
+ *
+ * @param [in,out] job_ring The job ring to poll.
+ * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if
+ * descriptors are to be discarded
+ * or notified to UA with given error_code.
+ * @param [out] notified_descs Number of notified descriptors. Can be NULL
+ * if do_notify is #FALSE
+ */
+static void
+hw_flush_job_ring(struct sec_job_ring_t *job_ring,
+ uint32_t do_notify,
+ uint32_t *notified_descs)
+{
+ int32_t jobs_no_to_discard = 0;
+ int32_t discarded_descs_no = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
+ job_ring, job_ring->pidx, job_ring->cidx, do_notify);
+
+ jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
+
+ /* Discard all jobs */
+ CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
+ job_ring, job_ring->pidx, job_ring->cidx,
+ jobs_no_to_discard);
+
+ while (jobs_no_to_discard > discarded_descs_no) {
+ discarded_descs_no++;
+ /* Now increment the consumer index for the current job ring,
+ * AFTER saving job in temporary location!
+ * Increment the consumer index for the current job ring
+ */
+ job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
+ SEC_JOB_RING_SIZE);
+
+ hw_remove_entries(job_ring, 1);
+ }
+
+ if (do_notify == true) {
+ ASSERT(notified_descs != NULL);
+ *notified_descs = discarded_descs_no;
+ }
+}
+
+/* @brief Poll the HW for already processed jobs in the JR
+ * and notify the available jobs to UA.
+ *
+ * @param [in] job_ring The job ring to poll.
+ * @param [in] limit The maximum number of jobs to notify.
+ * If set to negative value, all available jobs are
+ * notified.
+ *
+ * @retval >=0 for No of jobs notified to UA.
+ * @retval -1 for error
+ */
+static int
+hw_poll_job_ring(struct sec_job_ring_t *job_ring,
+ struct rte_crypto_op **ops, int32_t limit,
+ struct caam_jr_qp *jr_qp)
+{
+ int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
+ int32_t number_of_jobs_available = 0;
+ int32_t notified_descs_no = 0;
+ uint32_t sec_error_code = 0;
+ struct job_descriptor *current_desc;
+ phys_addr_t current_desc_addr;
+ phys_addr_t *temp_addr;
+ struct caam_jr_op_ctx *ctx;
+
+ PMD_INIT_FUNC_TRACE();
+ /* TODO check for ops have memory*/
+ /* check here if any JR error that cannot be written
+ * in the output status word has occurred
+ */
+ if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
+ CAAM_JR_INFO("err received");
+ sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
+ GET_JR_REG(JRINT, job_ring));
+ if (unlikely(sec_error_code)) {
+ hw_job_ring_error_print(job_ring, sec_error_code);
+ return -1;
+ }
+ }
+ /* compute the number of jobs available in the job ring based on the
+ * producer and consumer index values.
+ */
+ number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
+ /* Compute the number of notifications that need to be raised to UA
+ * If limit > total number of done jobs -> notify all done jobs
+ * If limit = 0 -> error
+ * If limit < total number of done jobs -> notify a number
+ * of done jobs equal with limit
+ */
+ jobs_no_to_notify = (limit > number_of_jobs_available) ?
+ number_of_jobs_available : limit;
+ CAAM_JR_DP_DEBUG(
+ "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
+ job_ring, job_ring->pidx, job_ring->cidx,
+ limit, number_of_jobs_available, jobs_no_to_notify);
+
+ rte_smp_rmb();
+
+ while (jobs_no_to_notify > notified_descs_no) {
+ static uint64_t false_alarm;
+ static uint64_t real_poll;
+
+ /* Get job status here */
+ sec_error_code = job_ring->output_ring[job_ring->cidx].status;
+ /* Get completed descriptor */
+ temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
+ current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
+
+ real_poll++;
+ /* todo check if it is false alarm no desc present */
+ if (!current_desc_addr) {
+ false_alarm++;
+ printf("false alarm %" PRIu64 "real %" PRIu64
+ " sec_err =0x%x cidx Index =0%d\n",
+ false_alarm, real_poll,
+ sec_error_code, job_ring->cidx);
+ rte_panic("CAAM JR descriptor NULL");
+ return notified_descs_no;
+ }
+ current_desc = (struct job_descriptor *)
+ caam_jr_dma_ptov(current_desc_addr);
+ /* now increment the consumer index for the current job ring,
+ * AFTER saving job in temporary location!
+ */
+ job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
+ SEC_JOB_RING_SIZE);
+ /* Signal that the job has been processed and the slot is free*/
+ hw_remove_entries(job_ring, 1);
+ /*TODO for multiple ops, packets*/
+ ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
+ if (unlikely(sec_error_code)) {
+ CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
+ job_ring->cidx, sec_error_code);
+ hw_handle_job_ring_error(job_ring, sec_error_code);
+ //todo improve with exact errors
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ jr_qp->rx_errs++;
+ } else {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if CAAM_JR_DBG
+ if (ctx->op->sym->m_dst) {
+ rte_hexdump(stdout, "PROCESSED",
+ rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
+ rte_pktmbuf_data_len(ctx->op->sym->m_dst));
+ } else {
+ rte_hexdump(stdout, "PROCESSED",
+ rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
+ rte_pktmbuf_data_len(ctx->op->sym->m_src));
+ }
+#endif
+ }
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct ip *ip4_hdr;
+
+ if (ctx->op->sym->m_dst) {
+ /*TODO check for ip header or other*/
+ ip4_hdr = (struct ip *)
+ rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
+ ctx->op->sym->m_dst->pkt_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ ctx->op->sym->m_dst->data_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ } else {
+ ip4_hdr = (struct ip *)
+ rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
+ ctx->op->sym->m_src->pkt_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ ctx->op->sym->m_src->data_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ }
+ }
+ *ops = ctx->op;
+ caam_jr_op_ending(ctx);
+ ops++;
+ notified_descs_no++;
+ }
+ return notified_descs_no;
+}
+
+static uint16_t
+caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
+ struct sec_job_ring_t *ring = jr_qp->ring;
+ int num_rx;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
+
+ /* Poll job ring
+ * If nb_ops < 0 -> poll JR until no more notifications are available.
+ * If nb_ops > 0 -> poll JR until limit is reached.
+ */
+
+ /* Run hw poll job ring */
+ num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
+ if (num_rx < 0) {
+ CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
+ return 0;
+ }
+
+ CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
+
+ if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
+ if (num_rx < nb_ops) {
+ ret = caam_jr_enable_irqs(ring->irq_fd);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to enable irqs for job ring %p", ring);
+ }
+ } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
+
+ /* Always enable IRQ generation when in pure IRQ mode */
+ ret = caam_jr_enable_irqs(ring->irq_fd);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to enable irqs for job ring %p", ring);
+ }
+
+ jr_qp->rx_pkts += num_rx;
+
+ return num_rx;
+}
+
+/**
+ * packet looks like:
+ * |<----data_len------->|
+ * |ip_header|ah_header|icv|payload|
+ * ^
+ * |
+ * mbuf->pkt.data
+ */
+static inline struct caam_jr_op_ctx *
+build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ int length;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ struct sec_job_descriptor_t *jobdescr;
+ uint8_t extra_segs;
+
+ PMD_INIT_FUNC_TRACE();
+ if (is_decode(ses))
+ extra_segs = 2;
+ else
+ extra_segs = 1;
+
+ if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+ CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
+ 0, ses->digest_length);
+
+ /*input */
+ sg = &ctx->sg[0];
+ length = sym->auth.data.length;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ /* digest verification case */
+ sg++;
+ /* hash result or digest, save digest first */
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+#if CAAM_JR_DBG
+ rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
+#endif
+ sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ length += ses->digest_length;
+ } else {
+ length -= ses->digest_length;
+ }
+
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_IN_PTR(jobdescr,
+ (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
+ /* enabling sg list */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ rte_iova_t start_addr;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ struct sec_job_descriptor_t *jobdescr;
+
+ PMD_INIT_FUNC_TRACE();
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ start_addr = rte_pktmbuf_iova(sym->m_src);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
+ 0, ses->digest_length);
+
+ /*input */
+ if (is_decode(ses)) {
+ sg = &ctx->sg[0];
+ SEC_JD_SET_IN_PTR(jobdescr,
+ (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
+ (sym->auth.data.length + ses->digest_length));
+ /* enabling sg list */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ /* hash result or digest, save digest first */
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(sym->auth.data.length);
+
+#if CAAM_JR_DBG
+ rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
+#endif
+ /* let's check digest by hw */
+ sg++;
+ sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ } else {
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
+ sym->auth.data.offset, sym->auth.data.length);
+ }
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg, *in_sg;
+ int length;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+ uint8_t reg_segs;
+
+ PMD_INIT_FUNC_TRACE();
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
+ } else {
+ mbuf = sym->m_src;
+ reg_segs = mbuf->nb_segs * 2 + 2;
+ }
+
+ if (reg_segs > MAX_SG_ENTRIES) {
+ CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+#if CAAM_JR_DBG
+ CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
+ sym->m_src->data_off, sym->cipher.data.offset,
+ sym->cipher.data.length, ses->iv.length);
+#endif
+ /* output */
+ if (sym->m_dst)
+ mbuf = sym->m_dst;
+ else
+ mbuf = sym->m_src;
+
+ sg = &ctx->sg[0];
+ length = sym->cipher.data.length;
+
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_OUT_PTR(jobdescr,
+ (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
+ length);
+ /*enabling sg bit */
+ (jobdescr)->seq_out.command.word |= 0x01000000;
+
+ /*input */
+ sg++;
+ mbuf = sym->m_src;
+ in_sg = sg;
+
+ length = sym->cipher.data.length + ses->iv.length;
+
+ /* IV */
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+
+ /* 1st seg */
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
+ length);
+ /*enabling sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+
+ PMD_INIT_FUNC_TRACE();
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+#if CAAM_JR_DBG
+ CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
+ sym->m_src->data_off, sym->cipher.data.offset,
+ sym->cipher.data.length, ses->iv.length);
+#endif
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
+ sym->cipher.data.offset,
+ sym->cipher.data.length + ses->iv.length);
+
+ /*input */
+ sg = &ctx->sg[0];
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
+ sym->cipher.data.length + ses->iv.length);
+ /*enabling sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+
+ sg = &ctx->sg[1];
+ sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(sym->cipher.data.length);
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ return ctx;
+}
+
+/* For decapsulation:
+ * Input:
+ * +----+----------------+--------------------------------+-----+
+ * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
+ * +----+----------------+--------------------------------+-----+
+ * Output:
+ * +----+--------------------------+
+ * | Decrypted & authenticated data |
+ * +----+--------------------------+
+ */
+
+static inline struct caam_jr_op_ctx *
+build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint32_t length = 0;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+ uint32_t auth_only_len;
+
+ PMD_INIT_FUNC_TRACE();
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 3;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ if (sym->m_dst)
+ mbuf = sym->m_dst;
+ else
+ mbuf = sym->m_src;
+
+ out_sg = &ctx->sg[0];
+ if (is_encode(ses))
+ length = sym->auth.data.length + ses->digest_length;
+ else
+ length = sym->auth.data.length;
+
+ sg = &ctx->sg[0];
+
+ /* 1st seg */
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+
+ if (is_encode(ses)) {
+ /* set auth output */
+ sg++;
+ sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
+ sg->len = cpu_to_caam32(ses->digest_length);
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_OUT_PTR(jobdescr,
+ (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
+ /* set sg bit */
+ (jobdescr)->seq_out.command.word |= 0x01000000;
+
+ /* input */
+ sg++;
+ mbuf = sym->m_src;
+ in_sg = sg;
+ if (is_encode(ses))
+ length = ses->iv.length + sym->auth.data.length;
+ else
+ length = ses->iv.length + sym->auth.data.length
+ + ses->digest_length;
+
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+
+ sg++;
+ /* 1st seg */
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ sg++;
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
+ length);
+ /* set sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the jd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ /* set sg bit */
+ (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint32_t length = 0;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+ uint32_t auth_only_len;
+
+ PMD_INIT_FUNC_TRACE();
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* input */
+ sg = &ctx->sg[0];
+ if (is_encode(ses)) {
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+ length += ses->iv.length;
+
+ sg++;
+ sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(sym->auth.data.length);
+ length += sym->auth.data.length;
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ } else {
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+ length += ses->iv.length;
+
+ sg++;
+ sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(sym->auth.data.length);
+ length += sym->auth.data.length;
+
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg++;
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ length += ses->digest_length;
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ }
+
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
+ length);
+ /* set sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ /* output */
+ sg = &ctx->sg[6];
+
+ sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(sym->cipher.data.length);
+ length = sym->cipher.data.length;
+
+ if (is_encode(ses)) {
+ /* set auth output */
+ sg++;
+ sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
+ sg->len = cpu_to_caam32(ses->digest_length);
+ length += ses->digest_length;
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_OUT_PTR(jobdescr,
+ (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
+ /* set sg bit */
+ (jobdescr)->seq_out.command.word |= 0x01000000;
+
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the jd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ /* set sg bit */
+ (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx = NULL;
+ phys_addr_t src_start_addr, dst_start_addr;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ struct sec_job_descriptor_t *jobdescr;
+
+ PMD_INIT_FUNC_TRACE();
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
+ sym->m_src->buf_len - sym->m_src->data_off);
+ /* input */
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
+ sym->m_src->pkt_len);
+ sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+
+ return ctx;
+}
+
+static int
+caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
+{
+ struct sec_job_ring_t *ring = qp->ring;
+ struct caam_jr_session *ses;
+ struct caam_jr_op_ctx *ctx = NULL;
+ struct sec_job_descriptor_t *jobdescr __rte_unused;
+
+ PMD_INIT_FUNC_TRACE();
+ switch (op->sess_type) {
+ case RTE_CRYPTO_OP_WITH_SESSION:
+ ses = (struct caam_jr_session *)
+ get_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id);
+ break;
+ case RTE_CRYPTO_OP_SECURITY_SESSION:
+ ses = (struct caam_jr_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+ break;
+ default:
+ CAAM_JR_DP_ERR("sessionless crypto op not supported");
+ qp->tx_errs++;
+ return -1;
+ }
+
+ if (unlikely(!ses->qp || ses->qp != qp)) {
+ CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
+ ses->qp = qp;
+ caam_jr_prep_cdb(ses);
+ }
+
+ if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ if (is_auth_cipher(ses))
+ ctx = build_cipher_auth(op, ses);
+ else if (is_aead(ses))
+ goto err1;
+ else if (is_auth_only(ses))
+ ctx = build_auth_only(op, ses);
+ else if (is_cipher_only(ses))
+ ctx = build_cipher_only(op, ses);
+ else if (is_proto_ipsec(ses))
+ ctx = build_proto(op, ses);
+ } else {
+ if (is_auth_cipher(ses))
+ ctx = build_cipher_auth_sg(op, ses);
+ else if (is_aead(ses))
+ goto err1;
+ else if (is_auth_only(ses))
+ ctx = build_auth_only_sg(op, ses);
+ else if (is_cipher_only(ses))
+ ctx = build_cipher_only_sg(op, ses);
+ }
+err1:
+ if (unlikely(!ctx)) {
+ qp->tx_errs++;
+ CAAM_JR_ERR("not supported sec op");
+ return -1;
+ }
+#if CAAM_JR_DBG
+ if (is_decode(ses))
+ rte_hexdump(stdout, "DECODE",
+ rte_pktmbuf_mtod(op->sym->m_src, void *),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ else
+ rte_hexdump(stdout, "ENCODE",
+ rte_pktmbuf_mtod(op->sym->m_src, void *),
+ rte_pktmbuf_data_len(op->sym->m_src));
+
+ printf("\n JD before conversion\n");
+ for (int i = 0; i < 12; i++)
+ printf("\n 0x%08x", ctx->jobdes.desc[i]);
+#endif
+
+ CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
+ ring, ring->pidx, ring->cidx);
+
+ /* todo - do we want to retry */
+ if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
+ SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
+ CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
+ ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
+ caam_jr_op_ending(ctx);
+ qp->tx_ring_full++;
+ return -EBUSY;
+ }
+
+#if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ jobdescr->deschdr.command.word =
+ cpu_to_caam32(jobdescr->deschdr.command.word);
+ jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
+ jobdescr->seq_out.command.word =
+ cpu_to_caam32(jobdescr->seq_out.command.word);
+ jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
+ jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
+ jobdescr->seq_in.command.word =
+ cpu_to_caam32(jobdescr->seq_in.command.word);
+ jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
+ jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
+ jobdescr->load_dpovrd.command.word =
+ cpu_to_caam32(jobdescr->load_dpovrd.command.word);
+ jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
+#endif
+
+ /* Set ptr in input ring to current descriptor */
+ sec_write_addr(&ring->input_ring[ring->pidx],
+ (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
+ rte_smp_wmb();
+
+ /* Notify HW that a new job is enqueued */
+ hw_enqueue_desc_on_job_ring(ring);
+
+ /* increment the producer index for the current job ring */
+ ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
+
+ return 0;
+}
+
+static uint16_t
+caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and queuepair */
+ uint32_t loop;
+ int32_t ret;
+ struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
+ uint16_t num_tx = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ /*Prepare each packet which is to be sent*/
+ for (loop = 0; loop < nb_ops; loop++) {
+ ret = caam_jr_enqueue_op(ops[loop], jr_qp);
+ if (!ret)
+ num_tx++;
+ }
+
+ jr_qp->tx_pkts += num_tx;
+
+ return num_tx;
+}
+
+/* Release queue pair */
+static int
+caam_jr_queue_pair_release(struct rte_cryptodev *dev,
+ uint16_t qp_id)
+{
+ struct sec_job_ring_t *internals;
+ struct caam_jr_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ CAAM_JR_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->ring = NULL;
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/* Setup a queue pair */
+static int
+caam_jr_queue_pair_setup(
+ struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct sec_job_ring_t *internals;
+ struct caam_jr_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ CAAM_JR_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->ring = internals;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+/* Return the number of allocated queue pairs */
+static uint32_t
+caam_jr_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/* Returns the size of the aesni gcm session structure */
+static unsigned int
+caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(struct caam_jr_session);
+}
+
+static int
+caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct caam_jr_session *session)
+{
+ PMD_INIT_FUNC_TRACE();
+ session->cipher_alg = xform->cipher.algo;
+ session->iv.length = xform->cipher.iv.length;
+ session->iv.offset = xform->cipher.iv.offset;
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
+ CAAM_JR_ERR("No Memory for cipher key\n");
+ return -ENOMEM;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct caam_jr_session *session)
+{
+ PMD_INIT_FUNC_TRACE();
+ session->auth_alg = xform->auth.algo;
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
+ CAAM_JR_ERR("No Memory for auth key\n");
+ return -ENOMEM;
+ }
+ session->auth_key.length = xform->auth.key.length;
+ session->digest_length = xform->auth.digest_length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct caam_jr_session *session)
+{
+ PMD_INIT_FUNC_TRACE();
+ session->aead_alg = xform->aead.algo;
+ session->iv.length = xform->aead.iv.length;
+ session->iv.offset = xform->aead.iv.offset;
+ session->auth_only_len = xform->aead.aad_length;
+ session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
+ CAAM_JR_ERR("No Memory for aead key\n");
+ return -ENOMEM;
+ }
+ session->aead_key.length = xform->aead.key.length;
+ session->digest_length = xform->aead.digest_length;
+
+ memcpy(session->aead_key.data, xform->aead.key.data,
+ xform->aead.key.length);
+ session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+caam_jr_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct sec_job_ring_t *internals = dev->data->dev_private;
+ struct caam_jr_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ CAAM_JR_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ caam_jr_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ caam_jr_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ caam_jr_cipher_init(dev, xform, session);
+ caam_jr_auth_init(dev, xform->next, session);
+ } else {
+ CAAM_JR_ERR("Not supported: Auth then Cipher");
+ goto err1;
+ }
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ caam_jr_auth_init(dev, xform, session);
+ caam_jr_cipher_init(dev, xform->next, session);
+ } else {
+ CAAM_JR_ERR("Not supported: Auth then Cipher");
+ goto err1;
+ }
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ caam_jr_aead_init(dev, xform, session);
+
+ } else {
+ CAAM_JR_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+ session->ctx_pool = internals->ctx_pool;
+
+ return 0;
+
+err1:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ memset(session, 0, sizeof(struct caam_jr_session));
+
+ return -EINVAL;
+}
+
+static int
+caam_jr_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CAAM_JR_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ memset(sess_private_data, 0, sizeof(struct caam_jr_session));
+ ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ CAAM_JR_ERR("failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+
+ return 0;
+}
+
+/* Clear the memory of session so it doesn't leave key material behind */
+static void
+caam_jr_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(s, 0, sizeof(struct caam_jr_session));
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct sec_job_ring_t *internals = dev->data->dev_private;
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ struct caam_jr_session *session = (struct caam_jr_session *)sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ session->proto_alg = conf->protocol;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ CAAM_JR_ERR("No Memory for cipher key\n");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ CAAM_JR_ERR("No Memory for auth key\n");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
+ auth_xform->algo);
+ goto out;
+ default:
+ CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto out;
+ }
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
+ cipher_xform->algo);
+ goto out;
+ default:
+ CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_v = IPVERSION;
+ session->ip4_hdr.ip_hl = 5;
+ session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ session->ip4_hdr.ip_id = 0;
+ session->ip4_hdr.ip_off = 0;
+ session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+ RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+ : IPPROTO_AH;
+ session->ip4_hdr.ip_sum = 0;
+ session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+ (void *)&session->ip4_hdr,
+ sizeof(struct ip));
+
+ session->encap_pdb.options =
+ (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ session->encap_pdb.spi = ipsec_xform->spi;
+ session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ session->decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ } else
+ goto out;
+ session->ctx_pool = internals->ctx_pool;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ memset(session, 0, sizeof(struct caam_jr_session));
+ return -1;
+}
+
+static int
+caam_jr_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CAAM_JR_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = caam_jr_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ CAAM_JR_ERR("failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/* Clear the memory of session so it doesn't leave key material behind */
+static int
+caam_jr_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(struct caam_jr_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+
+static int
+caam_jr_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ char str[20];
+ struct sec_job_ring_t *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ sizeof(struct caam_jr_op_ctx),
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ CAAM_JR_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ CAAM_JR_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
+ return 0;
+}
+
+static int
+caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+caam_jr_dev_close(struct rte_cryptodev *dev)
+{
+ struct sec_job_ring_t *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
+ return 0;
+}
+
+static void
+caam_jr_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct sec_job_ring_t *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = caam_jr_get_cryptodev_capabilities();
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static struct rte_cryptodev_ops caam_jr_ops = {
+ .dev_configure = caam_jr_dev_configure,
+ .dev_start = caam_jr_dev_start,
+ .dev_stop = caam_jr_dev_stop,
+ .dev_close = caam_jr_dev_close,
+ .dev_infos_get = caam_jr_dev_infos_get,
+ .stats_get = caam_jr_stats_get,
+ .stats_reset = caam_jr_stats_reset,
+ .queue_pair_setup = caam_jr_queue_pair_setup,
+ .queue_pair_release = caam_jr_queue_pair_release,
+ .queue_pair_count = caam_jr_queue_pair_count,
+ .sym_session_get_size = caam_jr_sym_session_get_size,
+ .sym_session_configure = caam_jr_sym_session_configure,
+ .sym_session_clear = caam_jr_sym_session_clear
+};
+
+static struct rte_security_ops caam_jr_security_ops = {
+ .session_create = caam_jr_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = caam_jr_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = caam_jr_get_security_capabilities
+};
+
+/* @brief Flush job rings of any processed descs.
+ * The processed descs are silently dropped,
+ * WITHOUT being notified to UA.
+ */
+static void
+close_job_ring(struct sec_job_ring_t *job_ring)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (job_ring->irq_fd) {
+ /* Producer index is frozen. If consumer index is not equal
+ * with producer index, then we have descs to flush.
+ */
+ while (job_ring->pidx != job_ring->cidx)
+ hw_flush_job_ring(job_ring, false, NULL);
+
+ /* free the uio job ring */
+ free_job_ring(job_ring->irq_fd);
+ job_ring->irq_fd = 0;
+ caam_jr_dma_free(job_ring->input_ring);
+ caam_jr_dma_free(job_ring->output_ring);
+ g_job_rings_no--;
+ }
+}
+
+/** @brief Release the software and hardware resources tied to a job ring.
+ * @param [in] job_ring The job ring
+ *
+ * @retval 0 for success
+ * @retval -1 for error
+ */
+static int
+shutdown_job_ring(struct sec_job_ring_t *job_ring)
+{
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ ASSERT(job_ring != NULL);
+ ret = hw_shutdown_job_ring(job_ring);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to shutdown hardware job ring %p",
+ job_ring);
+
+ if (job_ring->coalescing_en)
+ hw_job_ring_disable_coalescing(job_ring);
+
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
+ ret = caam_jr_disable_irqs(job_ring->irq_fd);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to disable irqs for job ring %p",
+ job_ring);
+ }
+
+ return ret;
+}
+
+/*
+ * @brief Release the resources used by the SEC user space driver.
+ *
+ * Reset and release SEC's job rings indicated by the User Application at
+ * init_job_ring() and free any memory allocated internally.
+ * Call once during application tear down.
+ *
+ * @note In case there are any descriptors in-flight (descriptors received by
+ * SEC driver for processing and for which no response was yet provided to UA),
+ * the descriptors are discarded without any notifications to User Application.
+ *
+ * @retval ::0 is returned for a successful execution
+ * @retval ::-1 is returned if SEC driver release is in progress
+ */
+static int
+caam_jr_dev_uninit(struct rte_cryptodev *dev)
+{
+ struct sec_job_ring_t *internals;
+
+ PMD_INIT_FUNC_TRACE();
+ if (dev == NULL)
+ return -ENODEV;
+
+ internals = dev->data->dev_private;
+ rte_free(dev->security_ctx);
+
+ /* If any descriptors in flight , poll and wait
+ * until all descriptors are received and silently discarded.
+ */
+ if (internals) {
+ shutdown_job_ring(internals);
+ close_job_ring(internals);
+ rte_mempool_free(internals->ctx_pool);
+ }
+
+ CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
+
+ /* last caam jr instance) */
+ if (g_job_rings_no == 0)
+ g_driver_state = SEC_DRIVER_STATE_IDLE;
+
+ return SEC_SUCCESS;
+}
+
+/* @brief Initialize the software and hardware resources tied to a job ring.
+ * @param [in] jr_mode; Model to be used by SEC Driver to receive
+ * notifications from SEC. Can be either
+ * of the three: #SEC_NOTIFICATION_TYPE_NAPI
+ * #SEC_NOTIFICATION_TYPE_IRQ or
+ * #SEC_NOTIFICATION_TYPE_POLL
+ * @param [in] NAPI_mode The NAPI work mode to configure a job ring at
+ * startup. Used only when #SEC_NOTIFICATION_TYPE
+ * is set to #SEC_NOTIFICATION_TYPE_NAPI.
+ * @param [in] irq_coalescing_timer This value determines the maximum
+ * amount of time after processing a
+ * descriptor before raising an interrupt.
+ * @param [in] irq_coalescing_count This value determines how many
+ * descriptors are completed before
+ * raising an interrupt.
+ * @param [in] reg_base_addr, The job ring base address register
+ * @param [in] irq_id The job ring interrupt identification number.
+ * @retval job_ring_handle for successful job ring configuration
+ * @retval NULL on error
+ *
+ */
+static void *
+init_job_ring(void *reg_base_addr, uint32_t irq_id)
+{
+ struct sec_job_ring_t *job_ring = NULL;
+ int i, ret = 0;
+ int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
+ int napi_mode = 0;
+ int irq_coalescing_timer = 0;
+ int irq_coalescing_count = 0;
+
+ for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
+ if (g_job_rings[i].irq_fd == 0) {
+ job_ring = &g_job_rings[i];
+ g_job_rings_no++;
+ break;
+ }
+ }
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("No free job ring\n");
+ return NULL;
+ }
+
+ job_ring->register_base_addr = reg_base_addr;
+ job_ring->jr_mode = jr_mode;
+ job_ring->napi_mode = 0;
+ job_ring->irq_fd = irq_id;
+
+ /* Allocate mem for input and output ring */
+
+ /* Allocate memory for input ring */
+ job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
+ SEC_DMA_MEM_INPUT_RING_SIZE);
+ memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
+
+ /* Allocate memory for output ring */
+ job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
+ SEC_DMA_MEM_OUTPUT_RING_SIZE);
+ memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
+
+ /* Reset job ring in SEC hw and configure job ring registers */
+ ret = hw_reset_job_ring(job_ring);
+ if (ret != 0) {
+ CAAM_JR_ERR("Failed to reset hardware job ring");
+ goto cleanup;
+ }
+
+ if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
+ /* When SEC US driver works in NAPI mode, the UA can select
+ * if the driver starts with IRQs on or off.
+ */
+ if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
+ CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
+ job_ring);
+ ret = caam_jr_enable_irqs(job_ring->irq_fd);
+ if (ret != 0) {
+ CAAM_JR_ERR("Failed to enable irqs for job ring");
+ goto cleanup;
+ }
+ }
+ } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
+ /* When SEC US driver works in pure interrupt mode,
+ * IRQ's are always enabled.
+ */
+ CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
+ job_ring);
+ ret = caam_jr_enable_irqs(job_ring->irq_fd);
+ if (ret != 0) {
+ CAAM_JR_ERR("Failed to enable irqs for job ring");
+ goto cleanup;
+ }
+ }
+ if (irq_coalescing_timer || irq_coalescing_count) {
+ hw_job_ring_set_coalescing_param(job_ring,
+ irq_coalescing_timer,
+ irq_coalescing_count);
+
+ hw_job_ring_enable_coalescing(job_ring);
+ job_ring->coalescing_en = 1;
+ }
+
+ job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
+ job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
+ job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
+
+ return job_ring;
+cleanup:
+ caam_jr_dma_free(job_ring->output_ring);
+ caam_jr_dma_free(job_ring->input_ring);
+ return NULL;
+}
+
+
+static int
+caam_jr_dev_init(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct rte_security_ctx *security_instance;
+ struct uio_job_ring *job_ring;
+ char str[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Validate driver state */
+ if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
+ g_job_rings_max = sec_configure();
+ if (!g_job_rings_max) {
+ CAAM_JR_ERR("No job ring detected on UIO !!!!");
+ return -1;
+ }
+ /* Update driver state */
+ g_driver_state = SEC_DRIVER_STATE_STARTED;
+ }
+
+ if (g_job_rings_no >= g_job_rings_max) {
+ CAAM_JR_ERR("No more job rings available max=%d!!!!",
+ g_job_rings_max);
+ return -1;
+ }
+
+ job_ring = config_job_ring();
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("failed to create job ring");
+ goto init_error;
+ }
+
+ snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ CAAM_JR_ERR("failed to create cryptodev vdev");
+ goto cleanup;
+ }
+ /*TODO free it during teardown*/
+ dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
+ job_ring->uio_fd);
+
+ if (!dev->data->dev_private) {
+ CAAM_JR_ERR("Ring memory allocation failed\n");
+ goto cleanup2;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = &caam_jr_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = caam_jr_dequeue_burst;
+ dev->enqueue_burst = caam_jr_enqueue_burst;
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ /* For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ CAAM_JR_WARN("Device already init by primary process");
+ return 0;
+ }
+
+ /*TODO free it during teardown*/
+ security_instance = rte_malloc("caam_jr",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL) {
+ CAAM_JR_ERR("memory allocation failed\n");
+ //todo error handling.
+ goto cleanup2;
+ }
+
+ security_instance->device = (void *)dev;
+ security_instance->ops = &caam_jr_security_ops;
+ security_instance->sess_cnt = 0;
+ dev->security_ctx = security_instance;
+
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
+
+ return 0;
+
+cleanup2:
+ caam_jr_dev_uninit(dev);
+ rte_cryptodev_pmd_release_device(dev);
+cleanup:
+ free_job_ring(job_ring->uio_fd);
+init_error:
+ CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
+ init_params->name);
+
+ return -ENXIO;
+}
+
+/** Initialise CAAM JR crypto device */
+static int
+cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct sec_job_ring_t),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ /* if sec device version is not configured */
+ if (!rta_get_sec_era()) {
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ if (prop) {
+ rta_set_sec_era(
+ INTL_SEC_ERA(cpu_to_caam32(*prop)));
+ break;
+ }
+ }
+ }
+#ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
+ if (rta_get_sec_era() > RTA_SEC_ERA_8) {
+ RTE_LOG(ERR, PMD,
+ "CAAM is compiled in BE mode for device with sec era > 8???\n");
+ return -EINVAL;
+ }
+#endif
+
+ return caam_jr_dev_init(name, vdev, &init_params);
+}
+
+/** Uninitialise CAAM JR crypto device */
+static int
+cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ caam_jr_dev_uninit(cryptodev);
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_caam_jr_drv = {
+ .probe = cryptodev_caam_jr_probe,
+ .remove = cryptodev_caam_jr_remove
+};
+
+static struct cryptodev_driver caam_jr_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
+ "max_nb_queue_pairs=<int>"
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(caam_jr_init_log)
+{
+ caam_jr_logtype = rte_log_register("pmd.crypto.caam");
+ if (caam_jr_logtype >= 0)
+ rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/caam_jr/caam_jr_capabilities.c b/drivers/crypto/caam_jr/caam_jr_capabilities.c
new file mode 100644
index 00000000..c51593c4
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_capabilities.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <caam_jr_capabilities.h>
+
+static const struct rte_cryptodev_capabilities caam_jr_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability caam_jr_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = caam_jr_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = caam_jr_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+const struct rte_cryptodev_capabilities *
+caam_jr_get_cryptodev_capabilities(void)
+{
+ return caam_jr_capabilities;
+}
+
+const struct rte_security_capability *
+caam_jr_get_security_capabilities(void *device __rte_unused)
+{
+ return caam_jr_security_cap;
+}
diff --git a/drivers/crypto/caam_jr/caam_jr_capabilities.h b/drivers/crypto/caam_jr/caam_jr_capabilities.h
new file mode 100644
index 00000000..c1e3f305
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_capabilities.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_CAPABILITIES_H
+#define CAAM_JR_CAPABILITIES_H
+
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+
+/* Get cryptodev capabilities */
+const struct rte_cryptodev_capabilities *
+caam_jr_get_cryptodev_capabilities(void);
+/* Get security capabilities */
+const struct rte_security_capability *
+caam_jr_get_security_capabilities(void *device);
+
+#endif
diff --git a/drivers/crypto/caam_jr/caam_jr_config.h b/drivers/crypto/caam_jr/caam_jr_config.h
new file mode 100644
index 00000000..041187a8
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_config.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_CONFIG_H
+#define CAAM_JR_CONFIG_H
+
+#include <rte_byteorder.h>
+
+#include <compat.h>
+
+#ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
+#define CAAM_BYTE_ORDER __BIG_ENDIAN
+#else
+#define CAAM_BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define CORE_BYTE_ORDER __BIG_ENDIAN
+#else
+#define CORE_BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+#if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
+
+#define cpu_to_caam64 rte_cpu_to_be_64
+#define cpu_to_caam32 rte_cpu_to_be_32
+#else
+#define cpu_to_caam64
+#define cpu_to_caam32
+
+#endif
+
+/*
+ * SEC is configured to start work in polling mode,
+ * when configured for NAPI notification style.
+ */
+#define SEC_STARTUP_POLLING_MODE 0
+/*
+ * SEC is configured to start work in interrupt mode,
+ * when configured for NAPI notification style.
+ */
+#define SEC_STARTUP_INTERRUPT_MODE 1
+
+/*
+ * SEC driver will use NAPI model to receive notifications
+ * for processed packets from SEC engine hardware:
+ * - IRQ for low traffic
+ * - polling for high traffic.
+ */
+#define SEC_NOTIFICATION_TYPE_NAPI 0
+/*
+ * SEC driver will use ONLY interrupts to receive notifications
+ * for processed packets from SEC engine hardware.
+ */
+#define SEC_NOTIFICATION_TYPE_IRQ 1
+/*
+ * SEC driver will use ONLY polling to receive notifications
+ * for processed packets from SEC engine hardware.
+ */
+#define SEC_NOTIFICATION_TYPE_POLL 2
+
+/*
+ * SEC USER SPACE DRIVER related configuration.
+ */
+
+/*
+ * Determines how SEC user space driver will receive notifications
+ * for processed packets from SEC engine.
+ * Valid values are: #SEC_NOTIFICATION_TYPE_POLL, #SEC_NOTIFICATION_TYPE_IRQ
+ * and #SEC_NOTIFICATION_TYPE_NAPI.
+ */
+#define SEC_NOTIFICATION_TYPE SEC_NOTIFICATION_TYPE_POLL
+
+/* Maximum number of job rings supported by SEC hardware */
+#define MAX_SEC_JOB_RINGS 4
+
+/* Maximum number of QP per job ring */
+#define RTE_CAAM_MAX_NB_SEC_QPS 1
+
+/*
+ * Size of cryptographic context that is used directly in communicating
+ * with SEC device. SEC device works only with physical addresses. This
+ * is the maximum size for a SEC descriptor ( = 64 words).
+ */
+#define SEC_CRYPTO_DESCRIPTOR_SIZE 256
+
+/*
+ * Size of job descriptor submitted to SEC device for each packet to
+ * be processed.
+ * Job descriptor contains 3 DMA address pointers:
+ * - to shared descriptor, to input buffer and to output buffer.
+ * The job descriptor contains other SEC specific commands as well:
+ * - HEADER command, SEQ IN PTR command SEQ OUT PTR command and opaque data
+ * each measuring 4 bytes.
+ * Job descriptor size, depending on physical address representation:
+ * - 32 bit - size is 28 bytes - cacheline-aligned size is 64 bytes
+ * - 36 bit - size is 40 bytes - cacheline-aligned size is 64 bytes
+ * @note: Job descriptor must be cacheline-aligned to ensure efficient
+ * memory access.
+ * @note: If other format is used for job descriptor, then the size must be
+ * revised.
+ */
+#define SEC_JOB_DESCRIPTOR_SIZE 64
+
+/*
+ * Size of one entry in the input ring of a job ring.
+ * Input ring contains pointers to job descriptors.
+ * The memory used for an input ring and output ring must be physically
+ * contiguous.
+ */
+#define SEC_JOB_INPUT_RING_ENTRY_SIZE sizeof(dma_addr_t)
+
+/*
+ * Size of one entry in the output ring of a job ring.
+ * Output ring entry is a pointer to a job descriptor followed by a 4 byte
+ * status word.
+ * The memory used for an input ring and output ring must be physically
+ * contiguous.
+ * @note If desired to use also the optional SEQ OUT indication in output ring
+ * entries,
+ * then 4 more bytes must be added to the size.
+ */
+#define SEC_JOB_OUTPUT_RING_ENTRY_SIZE (SEC_JOB_INPUT_RING_ENTRY_SIZE + 4)
+
+/*
+ * DMA memory required for an input ring of a job ring.
+ */
+#define SEC_DMA_MEM_INPUT_RING_SIZE ((SEC_JOB_INPUT_RING_ENTRY_SIZE) * \
+ (SEC_JOB_RING_SIZE))
+
+/*
+ * DMA memory required for an output ring of a job ring.
+ * Required extra 4 byte for status word per each entry.
+ */
+#define SEC_DMA_MEM_OUTPUT_RING_SIZE ((SEC_JOB_OUTPUT_RING_ENTRY_SIZE) * \
+ (SEC_JOB_RING_SIZE))
+
+/* DMA memory required for a job ring, including both input and output rings. */
+#define SEC_DMA_MEM_JOB_RING_SIZE ((SEC_DMA_MEM_INPUT_RING_SIZE) + \
+ (SEC_DMA_MEM_OUTPUT_RING_SIZE))
+
+/*
+ * When calling sec_init() UA will provide an area of virtual memory
+ * of size #SEC_DMA_MEMORY_SIZE to be used internally by the driver
+ * to allocate data (like SEC descriptors) that needs to be passed to
+ * SEC device in physical addressing and later on retrieved from SEC device.
+ * At initialization the UA provides specialized ptov/vtop functions/macros to
+ * translate addresses allocated from this memory area.
+ */
+#define SEC_DMA_MEMORY_SIZE ((SEC_DMA_MEM_JOB_RING_SIZE) * \
+ (MAX_SEC_JOB_RINGS))
+
+#define L1_CACHE_BYTES 64
+
+/* SEC JOB RING related configuration. */
+
+/*
+ * Configure the size of the JOB RING.
+ * The maximum size of the ring in hardware limited to 1024.
+ * However the number of packets in flight in a time interval of 1ms can
+ * be calculated from the traffic rate (Mbps) and packet size.
+ * Here it was considered a packet size of 64 bytes.
+ *
+ * @note Round up to nearest power of 2 for optimized update
+ * of producer/consumer indexes of each job ring
+ */
+#define SEC_JOB_RING_SIZE 512
+
+/*
+ * Interrupt coalescing related configuration.
+ * NOTE: SEC hardware enabled interrupt
+ * coalescing is not supported on SEC version 3.1!
+ * SEC version 4.4 has support for interrupt
+ * coalescing.
+ */
+
+#if SEC_NOTIFICATION_TYPE != SEC_NOTIFICATION_TYPE_POLL
+
+#define SEC_INT_COALESCING_ENABLE 1
+/*
+ * Interrupt Coalescing Descriptor Count Threshold.
+ * While interrupt coalescing is enabled (ICEN=1), this value determines
+ * how many Descriptors are completed before raising an interrupt.
+ *
+ * Valid values for this field are from 0 to 255.
+ * Note that a value of 1 functionally defeats the advantages of interrupt
+ * coalescing since the threshold value is reached each time that a
+ * Job Descriptor is completed. A value of 0 is treated in the same
+ * manner as a value of 1.
+ */
+#define SEC_INTERRUPT_COALESCING_DESCRIPTOR_COUNT_THRESH 10
+
+/*
+ * Interrupt Coalescing Timer Threshold.
+ * While interrupt coalescing is enabled (ICEN=1), this value determines the
+ * maximum amount of time after processing a Descriptor before raising an
+ * interrupt.
+ * The threshold value is represented in units equal to 64 CAAM interface
+ * clocks. Valid values for this field are from 1 to 65535.
+ * A value of 0 results in behavior identical to that when interrupt
+ * coalescing is disabled.
+ */
+#define SEC_INTERRUPT_COALESCING_TIMER_THRESH 100
+#endif /* SEC_NOTIFICATION_TYPE_POLL */
+
+#endif /* CAAM_JR_CONFIG_H */
diff --git a/drivers/crypto/caam_jr/caam_jr_desc.h b/drivers/crypto/caam_jr/caam_jr_desc.h
new file mode 100644
index 00000000..6683ea83
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_desc.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_DESC_H
+#define CAAM_JR_DESC_H
+
+#define CMD_HDR_CTYPE_SD 0x16
+#define CMD_HDR_CTYPE_JD 0x17
+
+/* The maximum size of a SEC descriptor, in WORDs (32 bits). */
+#define MAX_DESC_SIZE_WORDS 64
+
+/*
+ * Macros manipulating descriptors
+ */
+/* Macro for setting the SD pointer in a JD. Common for all protocols
+ * supported by the SEC driver.
+ */
+#define SEC_JD_SET_SD(descriptor, ptr, len) { \
+ (descriptor)->sd_ptr = (ptr); \
+ (descriptor)->deschdr.command.jd.shr_desc_len = (len); \
+}
+
+/* Macro for setting a pointer to the job which this descriptor processes.
+ * It eases the lookup procedure for identifying the descriptor that has
+ * completed.
+ */
+#define SEC_JD_SET_JOB_PTR(descriptor, ptr) \
+ ((descriptor)->job_ptr = (ptr))
+
+/* Macro for setting up a JD. The structure of the JD is common across all
+ * supported protocols, thus its structure is identical.
+ */
+#define SEC_JD_INIT(descriptor) ({ \
+ /* CTYPE = job descriptor \
+ * RSMS, DNR = 0
+ * ONE = 1
+ * Start Index = 0
+ * ZRO,TD, MTD = 0
+ * SHR = 1 (there's a shared descriptor referenced
+ * by this job descriptor,pointer in next word)
+ * REO = 1 (execute job descr. first, shared descriptor
+ * after)
+ * SHARE = DEFER
+ * Descriptor Length = 0 ( to be completed @ runtime ) */ \
+ (descriptor)->deschdr.command.word = 0xB0801C0D; \
+ /*
+ * CTYPE = SEQ OUT command * Scater Gather Flag = 0
+ * (can be updated @ runtime) PRE = 0 * EXT = 1
+ * (data length is in next word, following the * command)
+ * RTO = 0 */ \
+ (descriptor)->seq_out.command.word = 0xF8400000; /**/ \
+ /*
+ * CTYPE = SEQ IN command
+ * Scater Gather Flag = 0 (can be updated @ runtime)
+ * PRE = 0
+ * EXT = 1 ( data length is in next word, following the
+ * command)
+ * RTO = 0 */ \
+ (descriptor)->seq_in.command.word = 0xF0400000; /**/ \
+ /*
+ * In order to be compatible with QI scenarios, the DPOVRD value
+ * loaded must be formated like this:
+ * DPOVRD_EN (1b) | Res| DPOVRD Value (right aligned). */ \
+ (descriptor)->load_dpovrd.command.word = 0x16870004; \
+ /* By default, DPOVRD mechanism is disabled, thus the value to be
+ * LOAD-ed through the above descriptor command will be
+ * 0x0000_0000. */ \
+ (descriptor)->dpovrd = 0x00000000; \
+})
+
+/* Macro for setting the pointer to the input buffer in the JD, according to
+ * the parameters set by the user in the ::sec_packet_t structure.
+ */
+#define SEC_JD_SET_IN_PTR(descriptor, phys_addr, offset, length) { \
+ (descriptor)->seq_in_ptr = (phys_addr) + (offset); \
+ (descriptor)->in_ext_length = (length); \
+}
+
+/* Macro for setting the pointer to the output buffer in the JD, according to
+ * the parameters set by the user in the ::sec_packet_t structure.
+ */
+#define SEC_JD_SET_OUT_PTR(descriptor, phys_addr, offset, length) { \
+ (descriptor)->seq_out_ptr = (phys_addr) + (offset); \
+ (descriptor)->out_ext_length = (length); \
+}
+
+/* Macro for setting the Scatter-Gather flag in the SEQ IN command. Used in
+ * case the input buffer is split in multiple buffers, according to the user
+ * specification.
+ */
+#define SEC_JD_SET_SG_IN(descriptor) \
+ ((descriptor)->seq_in.command.field.sgf = 1)
+
+/* Macro for setting the Scatter-Gather flag in the SEQ OUT command. Used in
+ * case the output buffer is split in multiple buffers, according to the user
+ * specification.
+ */
+#define SEC_JD_SET_SG_OUT(descriptor) \
+ ((descriptor)->seq_out.command.field.sgf = 1)
+
+#define SEC_JD_SET_DPOVRD(descriptor) \
+
+/* Macro for retrieving a descriptor's length. Works for both SD and JD. */
+#define SEC_GET_DESC_LEN(descriptor) \
+ (((struct descriptor_header_s *)(descriptor))->command.sd.ctype == \
+ CMD_HDR_CTYPE_SD ? ((struct descriptor_header_s *) \
+ (descriptor))->command.sd.desclen : \
+ ((struct descriptor_header_s *)(descriptor))->command.jd.desclen)
+
+/* Helper macro for dumping the hex representation of a descriptor */
+#define SEC_DUMP_DESC(descriptor) { \
+ int __i; \
+ CAAM_JR_INFO("Des@ 0x%08x\n", (uint32_t)((uint32_t *)(descriptor)));\
+ for (__i = 0; \
+ __i < SEC_GET_DESC_LEN(descriptor); \
+ __i++) { \
+ printf("0x%08x: 0x%08x\n", \
+ (uint32_t)(((uint32_t *)(descriptor)) + __i), \
+ *(((uint32_t *)(descriptor)) + __i)); \
+ } \
+}
+/* Union describing a descriptor header.
+ */
+struct descriptor_header_s {
+ union {
+ uint32_t word;
+ struct {
+ /* 4 */ unsigned int ctype:5;
+ /* 5 */ unsigned int res1:2;
+ /* 7 */ unsigned int dnr:1;
+ /* 8 */ unsigned int one:1;
+ /* 9 */ unsigned int res2:1;
+ /* 10 */ unsigned int start_idx:6;
+ /* 16 */ unsigned int res3:2;
+ /* 18 */ unsigned int cif:1;
+ /* 19 */ unsigned int sc:1;
+ /* 20 */ unsigned int pd:1;
+ /* 21 */ unsigned int res4:1;
+ /* 22 */ unsigned int share:2;
+ /* 24 */ unsigned int res5:2;
+ /* 26 */ unsigned int desclen:6;
+ } sd;
+ struct {
+ /* TODO only below struct members are corrected,
+ * all others also need to be reversed please verify it
+ */
+ /* 0 */ unsigned int desclen:7;
+ /* 7 */ unsigned int res4:1;
+ /* 8 */ unsigned int share:3;
+ /* 11 */ unsigned int reo:1;
+ /* 12 */ unsigned int shr:1;
+ /* 13 */ unsigned int mtd:1;
+ /* 14 */ unsigned int td:1;
+ /* 15 */ unsigned int zero:1;
+ /* 16 */ unsigned int shr_desc_len:6;
+ /* 22 */ unsigned int res2:1;
+ /* 23 */ unsigned int one:1;
+ /* 24 */ unsigned int dnr:1;
+ /* 25 */ unsigned int rsms:1;
+ /* 26 */ unsigned int res1:1;
+ /* 27 */ unsigned int ctype:5;
+ } jd;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a KEY command in a descriptor.
+ */
+struct key_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int cls:2;
+ unsigned int sgf:1;
+ unsigned int imm:1;
+ unsigned int enc:1;
+ unsigned int nwb:1;
+ unsigned int ekt:1;
+ unsigned int kdest:4;
+ unsigned int tk:1;
+ unsigned int rsvd1:5;
+ unsigned int length:10;
+ } __rte_packed field;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a PROTOCOL command
+ * in a descriptor.
+ */
+struct protocol_operation_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int optype:3;
+ unsigned char protid;
+ unsigned short protinfo;
+ } __rte_packed field;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a SEQIN command in a
+ * descriptor.
+ */
+struct seq_in_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int res1:1;
+ unsigned int inl:1;
+ unsigned int sgf:1;
+ unsigned int pre:1;
+ unsigned int ext:1;
+ unsigned int rto:1;
+ unsigned int rjd:1;
+ unsigned int res2:4;
+ unsigned int length:16;
+ } field;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a SEQOUT command in a
+ * descriptor.
+ */
+struct seq_out_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int res1:2;
+ unsigned int sgf:1;
+ unsigned int pre:1;
+ unsigned int ext:1;
+ unsigned int rto:1;
+ unsigned int res2:5;
+ unsigned int length:16;
+ } field;
+ } __rte_packed command;
+} __rte_packed;
+
+struct load_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int class:2;
+ unsigned int sgf:1;
+ unsigned int imm:1;
+ unsigned int dst:7;
+ unsigned char offset;
+ unsigned char length;
+ } fields;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Structure encompassing a general shared descriptor of maximum
+ * size (64 WORDs). Usually, other specific shared descriptor structures
+ * will be type-casted to this one
+ * this one.
+ */
+struct sec_sd_t {
+ uint32_t rsvd[MAX_DESC_SIZE_WORDS];
+} __attribute__((packed, aligned(64)));
+
+/* Structure encompassing a job descriptor which processes
+ * a single packet from a context. The job descriptor references
+ * a shared descriptor from a SEC context.
+ */
+struct sec_job_descriptor_t {
+ struct descriptor_header_s deschdr;
+ dma_addr_t sd_ptr;
+ struct seq_out_command_s seq_out;
+ dma_addr_t seq_out_ptr;
+ uint32_t out_ext_length;
+ struct seq_in_command_s seq_in;
+ dma_addr_t seq_in_ptr;
+ uint32_t in_ext_length;
+ struct load_command_s load_dpovrd;
+ uint32_t dpovrd;
+} __attribute__((packed, aligned(64)));
+
+#endif
diff --git a/drivers/crypto/caam_jr/caam_jr_hw.c b/drivers/crypto/caam_jr/caam_jr_hw.c
new file mode 100644
index 00000000..4a2b0899
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_hw.c
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_crypto.h>
+#include <rte_security.h>
+
+#include <caam_jr_config.h>
+#include <caam_jr_hw_specific.h>
+#include <caam_jr_pvt.h>
+#include <caam_jr_log.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+/* Used to retry resetting a job ring in SEC hardware. */
+#define SEC_TIMEOUT 100000
+
+/* @brief Process Jump Halt Condition related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_jmp_halt_cond_err(union hw_error_code error_code)
+{
+ CAAM_JR_DEBUG("JMP: %d, Descriptor Index: 0x%x, Condition: 0x%x",
+ error_code.error_desc.jmp_halt_cond_src.jmp,
+ error_code.error_desc.jmp_halt_cond_src.desc_idx,
+ error_code.error_desc.jmp_halt_cond_src.cond);
+ (void)error_code;
+}
+
+/* @brief Process DECO related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_deco_err(union hw_error_code error_code)
+{
+ CAAM_JR_DEBUG("JMP: %d, Descriptor Index: 0x%x",
+ error_code.error_desc.deco_src.jmp,
+ error_code.error_desc.deco_src.desc_idx);
+
+ switch (error_code.error_desc.deco_src.desc_err) {
+ case SEC_HW_ERR_DECO_HFN_THRESHOLD:
+ CAAM_JR_DEBUG(" Warning: Descriptor completed normally,"
+ "but 3GPP HFN matches or exceeds the Threshold ");
+ break;
+ default:
+ CAAM_JR_DEBUG("Error 0x%04x not implemented",
+ error_code.error_desc.deco_src.desc_err);
+ break;
+ }
+}
+
+/* @brief Process Jump Halt User Status related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_jmp_halt_user_err(union hw_error_code error_code __rte_unused)
+{
+ CAAM_JR_DEBUG(" Not implemented");
+}
+
+/* @brief Process CCB related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_ccb_err(union hw_error_code hw_error_code __rte_unused)
+{
+ CAAM_JR_DEBUG(" Not implemented");
+}
+
+/* @brief Process Job Ring related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_jr_err(union hw_error_code hw_error_code __rte_unused)
+{
+ CAAM_JR_DEBUG(" Not implemented");
+}
+
+int
+hw_reset_job_ring(struct sec_job_ring_t *job_ring)
+{
+ int ret = 0;
+
+ ASSERT(job_ring->register_base_addr != NULL);
+
+ /* First reset the job ring in hw */
+ ret = hw_shutdown_job_ring(job_ring);
+ SEC_ASSERT(ret == 0, ret, "Failed resetting job ring in hardware");
+
+ /* In order to have the HW JR in a workable state
+ * after a reset, I need to re-write the input
+ * queue size, input start address, output queue
+ * size and output start address
+ */
+ /* Write the JR input queue size to the HW register */
+ hw_set_input_ring_size(job_ring, SEC_JOB_RING_SIZE);
+
+ /* Write the JR output queue size to the HW register */
+ hw_set_output_ring_size(job_ring, SEC_JOB_RING_SIZE);
+
+ /* Write the JR input queue start address */
+ hw_set_input_ring_start_addr(job_ring,
+ caam_jr_dma_vtop(job_ring->input_ring));
+ CAAM_JR_DEBUG(" Set input ring base address to : Virtual: 0x%" PRIx64
+ ",Physical: 0x%" PRIx64 ", Read from HW: 0x%" PRIx64,
+ (uint64_t)(uintptr_t)job_ring->input_ring,
+ caam_jr_dma_vtop(job_ring->input_ring),
+ hw_get_inp_queue_base(job_ring));
+
+ /* Write the JR output queue start address */
+ hw_set_output_ring_start_addr(job_ring,
+ caam_jr_dma_vtop(job_ring->output_ring));
+ CAAM_JR_DEBUG(" Set output ring base address to: Virtual: 0x%" PRIx64
+ ",Physical: 0x%" PRIx64 ", Read from HW: 0x%" PRIx64,
+ (uint64_t)(uintptr_t)job_ring->output_ring,
+ caam_jr_dma_vtop(job_ring->output_ring),
+ hw_get_out_queue_base(job_ring));
+ return ret;
+}
+
+int
+hw_shutdown_job_ring(struct sec_job_ring_t *job_ring)
+{
+ unsigned int timeout = SEC_TIMEOUT;
+ uint32_t tmp = 0;
+ int usleep_interval = 10;
+
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return 0;
+ }
+
+ CAAM_JR_INFO("Resetting Job ring %p", job_ring);
+
+ /*
+ * Mask interrupts since we are going to poll
+ * for reset completion status
+ * Also, at POR, interrupts are ENABLED on a JR, thus
+ * this is the point where I can disable them without
+ * changing the code logic too much
+ */
+ caam_jr_disable_irqs(job_ring->irq_fd);
+
+ /* initiate flush (required prior to reset) */
+ SET_JR_REG(JRCR, job_ring, JR_REG_JRCR_VAL_RESET);
+
+ /* dummy read */
+ tmp = GET_JR_REG(JRCR, job_ring);
+
+ do {
+ tmp = GET_JR_REG(JRINT, job_ring);
+ usleep(usleep_interval);
+ } while (((tmp & JRINT_ERR_HALT_MASK) ==
+ JRINT_ERR_HALT_INPROGRESS) && --timeout);
+
+ CAAM_JR_INFO("JRINT is %x", tmp);
+ if ((tmp & JRINT_ERR_HALT_MASK) != JRINT_ERR_HALT_COMPLETE ||
+ timeout == 0) {
+ CAAM_JR_ERR("0x%x, %d", tmp, timeout);
+ /* unmask interrupts */
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL)
+ caam_jr_enable_irqs(job_ring->irq_fd);
+ return -1;
+ }
+
+ /* Initiate reset */
+ timeout = SEC_TIMEOUT;
+ SET_JR_REG(JRCR, job_ring, JR_REG_JRCR_VAL_RESET);
+
+ do {
+ tmp = GET_JR_REG(JRCR, job_ring);
+ usleep(usleep_interval);
+ } while ((tmp & JR_REG_JRCR_VAL_RESET) && --timeout);
+
+ CAAM_JR_DEBUG("JRCR is %x", tmp);
+ if (timeout == 0) {
+ CAAM_JR_ERR("Failed to reset hw job ring %p", job_ring);
+ /* unmask interrupts */
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL)
+ caam_jr_enable_irqs(job_ring->irq_fd);
+ return -1;
+ }
+ /* unmask interrupts */
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL)
+ caam_jr_enable_irqs(job_ring->irq_fd);
+ return 0;
+
+}
+
+void
+hw_handle_job_ring_error(struct sec_job_ring_t *job_ring __rte_unused,
+ uint32_t error_code)
+{
+ union hw_error_code hw_err_code;
+
+ hw_err_code.error = error_code;
+ switch (hw_err_code.error_desc.value.ssrc) {
+ case SEC_HW_ERR_SSRC_NO_SRC:
+ ASSERT(hw_err_code.error_desc.no_status_src.res == 0);
+ CAAM_JR_ERR("No Status Source ");
+ break;
+ case SEC_HW_ERR_SSRC_CCB_ERR:
+ CAAM_JR_ERR("CCB Status Source");
+ hw_handle_ccb_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_JMP_HALT_U:
+ CAAM_JR_ERR("Jump Halt User Status Source");
+ hw_handle_jmp_halt_user_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_DECO:
+ CAAM_JR_ERR("DECO Status Source");
+ hw_handle_deco_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_JR:
+ CAAM_JR_ERR("Job Ring Status Source");
+ hw_handle_jr_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_JMP_HALT_COND:
+ CAAM_JR_ERR("Jump Halt Condition Codes");
+ hw_handle_jmp_halt_cond_err(hw_err_code);
+ break;
+ default:
+ ASSERT(0);
+ CAAM_JR_ERR("Unknown SSRC");
+ break;
+ }
+}
+
+void
+hw_job_ring_error_print(struct sec_job_ring_t *job_ring, int code)
+{
+ switch (code) {
+ case JRINT_ERR_WRITE_STATUS:
+ CAAM_JR_ERR("Error writing status to Output Ring ");
+ break;
+ case JRINT_ERR_BAD_INPUT_BASE:
+ CAAM_JR_ERR(
+ "Bad Input Ring Base (%p) (not on a 4-byte boundary) ",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_BAD_OUTPUT_BASE:
+ CAAM_JR_ERR(
+ "Bad Output Ring Base (%p) (not on a 4-byte boundary) ",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_WRITE_2_IRBA:
+ CAAM_JR_ERR(
+ "Invalid write to Input Ring Base Address Register ");
+ break;
+ case JRINT_ERR_WRITE_2_ORBA:
+ CAAM_JR_ERR(
+ "Invalid write to Output Ring Base Address Register ");
+ break;
+ case JRINT_ERR_RES_B4_HALT:
+ CAAM_JR_ERR(
+ "Job Ring [%p] released before Job Ring is halted",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_REM_TOO_MANY:
+ CAAM_JR_ERR("Removed too many jobs from job ring [%p]",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_ADD_TOO_MANY:
+ CAAM_JR_ERR("Added too many jobs on job ring [%p]", job_ring);
+ break;
+ default:
+ CAAM_JR_ERR(" Unknown SEC JR Error :%d",
+ code);
+ break;
+ }
+}
+
+int
+hw_job_ring_set_coalescing_param(struct sec_job_ring_t *job_ring,
+ uint16_t irq_coalescing_timer,
+ uint8_t irq_coalescing_count)
+{
+ uint32_t reg_val = 0;
+
+ ASSERT(job_ring != NULL);
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return -1;
+ }
+ /* Set descriptor count coalescing */
+ reg_val |= (irq_coalescing_count << JR_REG_JRCFG_LO_ICDCT_SHIFT);
+
+ /* Set coalescing timer value */
+ reg_val |= (irq_coalescing_timer << JR_REG_JRCFG_LO_ICTT_SHIFT);
+
+ /* Update parameters in HW */
+ SET_JR_REG_LO(JRCFG, job_ring, reg_val);
+ CAAM_JR_DEBUG("Set coalescing params on jr %p timer:%d, desc count: %d",
+ job_ring, irq_coalescing_timer, irq_coalescing_timer);
+
+ return 0;
+}
+
+int
+hw_job_ring_enable_coalescing(struct sec_job_ring_t *job_ring)
+{
+ uint32_t reg_val = 0;
+
+ ASSERT(job_ring != NULL);
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return -1;
+ }
+
+ /* Get the current value of the register */
+ reg_val = GET_JR_REG_LO(JRCFG, job_ring);
+
+ /* Enable coalescing */
+ reg_val |= JR_REG_JRCFG_LO_ICEN_EN;
+
+ /* Write in hw */
+ SET_JR_REG_LO(JRCFG, job_ring, reg_val);
+
+ CAAM_JR_DEBUG("Enabled coalescing on jr %p ",
+ job_ring);
+
+ return 0;
+}
+
+int
+hw_job_ring_disable_coalescing(struct sec_job_ring_t *job_ring)
+{
+ uint32_t reg_val = 0;
+
+ ASSERT(job_ring != NULL);
+
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return -1;
+ }
+
+ /* Get the current value of the register */
+ reg_val = GET_JR_REG_LO(JRCFG, job_ring);
+
+ /* Disable coalescing */
+ reg_val &= ~JR_REG_JRCFG_LO_ICEN_EN;
+
+ /* Write in hw */
+ SET_JR_REG_LO(JRCFG, job_ring, reg_val);
+ CAAM_JR_DEBUG("Disabled coalescing on jr %p ", job_ring);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam_jr/caam_jr_hw_specific.h b/drivers/crypto/caam_jr/caam_jr_hw_specific.h
new file mode 100644
index 00000000..5f58a585
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_hw_specific.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef CAAM_JR_HW_SPECIFIC_H
+#define CAAM_JR_HW_SPECIFIC_H
+
+#include <caam_jr_config.h>
+
+/*
+ * Offset to the registers of a job ring.
+ * Is different for each job ring.
+ */
+#define CHAN_BASE(jr) ((size_t)(jr)->register_base_addr)
+
+#define SEC_JOB_RING_IS_FULL(pi, ci, ring_max_size, ring_threshold) \
+ ((((pi) + 1 + ((ring_max_size) - (ring_threshold))) & \
+ (ring_max_size - 1)) == ((ci)))
+
+#define SEC_CIRCULAR_COUNTER(x, max) (((x) + 1) & (max - 1))
+
+/*
+ * Assert that cond is true. If !cond is true, display str and the vararg list
+ * in a printf-like syntax. also, if !cond is true, return altRet.
+ *
+ * \param cond A boolean expression to be asserted true
+ * \param altRet The value to be returned if cond doesn't hold true
+ * \param str A quoted char string
+ *
+ * E.g.:
+ * SEC_ASSERT(ret > 0, 0, "ERROR initializing app: code = %d\n", ret);
+ */
+#define SEC_ASSERT(cond, altRet, ...) do {\
+ if (unlikely(!(cond))) {\
+ CAAM_JR_ERR(__VA_ARGS__); \
+ return altRet; \
+ } \
+} while (0)
+
+#define SEC_DP_ASSERT(cond, altRet, ...) do {\
+ if (unlikely(!(cond))) {\
+ CAAM_JR_DP_ERR(__VA_ARGS__); \
+ return altRet; \
+ } \
+} while (0)
+
+#define ASSERT(x)
+
+/*
+ * Constants representing various job ring registers
+ */
+#if CAAM_BYTE_ORDER == __BIG_ENDIAN
+#define JR_REG_IRBA_OFFSET 0x0000
+#define JR_REG_IRBA_OFFSET_LO 0x0004
+#else
+#define JR_REG_IRBA_OFFSET 0x0004
+#define JR_REG_IRBA_OFFSET_LO 0x0000
+#endif
+
+#define JR_REG_IRSR_OFFSET 0x000C
+#define JR_REG_IRSA_OFFSET 0x0014
+#define JR_REG_IRJA_OFFSET 0x001C
+
+#if CAAM_BYTE_ORDER == __BIG_ENDIAN
+#define JR_REG_ORBA_OFFSET 0x0020
+#define JR_REG_ORBA_OFFSET_LO 0x0024
+#else
+#define JR_REG_ORBA_OFFSET 0x0024
+#define JR_REG_ORBA_OFFSET_LO 0x0020
+#endif
+
+#define JR_REG_ORSR_OFFSET 0x002C
+#define JR_REG_ORJR_OFFSET 0x0034
+#define JR_REG_ORSFR_OFFSET 0x003C
+#define JR_REG_JROSR_OFFSET 0x0044
+#define JR_REG_JRINT_OFFSET 0x004C
+
+#define JR_REG_JRCFG_OFFSET 0x0050
+#define JR_REG_JRCFG_OFFSET_LO 0x0054
+
+#define JR_REG_IRRI_OFFSET 0x005C
+#define JR_REG_ORWI_OFFSET 0x0064
+#define JR_REG_JRCR_OFFSET 0x006C
+
+/*
+ * Constants for error handling on job ring
+ */
+#define JR_REG_JRINT_ERR_TYPE_SHIFT 8
+#define JR_REG_JRINT_ERR_ORWI_SHIFT 16
+#define JR_REG_JRINIT_JRE_SHIFT 1
+
+#define JRINT_JRE (1 << JR_REG_JRINIT_JRE_SHIFT)
+#define JRINT_ERR_WRITE_STATUS (1 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_BAD_INPUT_BASE (3 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_BAD_OUTPUT_BASE (4 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_WRITE_2_IRBA (5 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_WRITE_2_ORBA (6 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_RES_B4_HALT (7 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_REM_TOO_MANY (8 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_ADD_TOO_MANY (9 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_HALT_MASK 0x0C
+#define JRINT_ERR_HALT_INPROGRESS 0x04
+#define JRINT_ERR_HALT_COMPLETE 0x08
+
+#define JR_REG_JRCR_VAL_RESET 0x00000001
+
+#define JR_REG_JRCFG_LO_ICTT_SHIFT 0x10
+#define JR_REG_JRCFG_LO_ICDCT_SHIFT 0x08
+#define JR_REG_JRCFG_LO_ICEN_EN 0x02
+
+/*
+ * Constants for Descriptor Processing errors
+ */
+#define SEC_HW_ERR_SSRC_NO_SRC 0x00
+#define SEC_HW_ERR_SSRC_CCB_ERR 0x02
+#define SEC_HW_ERR_SSRC_JMP_HALT_U 0x03
+#define SEC_HW_ERR_SSRC_DECO 0x04
+#define SEC_HW_ERR_SSRC_JR 0x06
+#define SEC_HW_ERR_SSRC_JMP_HALT_COND 0x07
+
+#define SEC_HW_ERR_DECO_HFN_THRESHOLD 0xF1
+#define SEC_HW_ERR_CCB_ICV_CHECK_FAIL 0x0A
+
+/*
+ * Constants for descriptors
+ */
+/* Return higher 32 bits of physical address */
+#define PHYS_ADDR_HI(phys_addr) \
+ (uint32_t)(((uint64_t)phys_addr) >> 32)
+
+/* Return lower 32 bits of physical address */
+#define PHYS_ADDR_LO(phys_addr) \
+ (uint32_t)(((uint64_t)phys_addr) & 0xFFFFFFFF)
+
+/*
+ * Macros for extracting error codes for the job ring
+ */
+#define JR_REG_JRINT_ERR_TYPE_EXTRACT(value) ((value) & 0x00000F00)
+#define JR_REG_JRINT_ERR_ORWI_EXTRACT(value) \
+ (((value) & 0x3FFF0000) >> JR_REG_JRINT_ERR_ORWI_SHIFT)
+#define JR_REG_JRINT_JRE_EXTRACT(value) ((value) & JRINT_JRE)
+
+/*
+ * Macros for managing the job ring
+ */
+/* Read pointer to job ring input ring start address */
+#if defined(RTE_ARCH_ARM64)
+#define hw_get_inp_queue_base(jr) ((((dma_addr_t)GET_JR_REG(IRBA, \
+ (jr))) << 32) | \
+ (GET_JR_REG_LO(IRBA, (jr))))
+
+/* Read pointer to job ring output ring start address */
+#define hw_get_out_queue_base(jr) (((dma_addr_t)(GET_JR_REG(ORBA, \
+ (jr))) << 32) | \
+ (GET_JR_REG_LO(ORBA, (jr))))
+#else
+#define hw_get_inp_queue_base(jr) ((dma_addr_t)(GET_JR_REG_LO(IRBA, (jr))))
+
+#define hw_get_out_queue_base(jr) ((dma_addr_t)(GET_JR_REG_LO(ORBA, (jr))))
+#endif
+
+/*
+ * IRJA - Input Ring Jobs Added Register shows
+ * how many new jobs were added to the Input Ring.
+ */
+#define hw_enqueue_desc_on_job_ring(job_ring) SET_JR_REG(IRJA, (job_ring), 1)
+
+#define hw_set_input_ring_size(job_ring, size) SET_JR_REG(IRSR, job_ring, \
+ (size))
+
+#define hw_set_output_ring_size(job_ring, size) SET_JR_REG(ORSR, job_ring, \
+ (size))
+
+#if defined(RTE_ARCH_ARM64)
+#define hw_set_input_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(IRBA, job_ring, PHYS_ADDR_HI(start_addr)); \
+ SET_JR_REG_LO(IRBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+
+#define hw_set_output_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(ORBA, job_ring, PHYS_ADDR_HI(start_addr)); \
+ SET_JR_REG_LO(ORBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+
+#else
+#define hw_set_input_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(IRBA, job_ring, 0); \
+ SET_JR_REG_LO(IRBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+
+#define hw_set_output_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(ORBA, job_ring, 0); \
+ SET_JR_REG_LO(ORBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+#endif
+
+/* ORJR - Output Ring Jobs Removed Register shows how many jobs were
+ * removed from the Output Ring for processing by software. This is done after
+ * the software has processed the entries.
+ */
+#define hw_remove_entries(jr, no_entries) SET_JR_REG(ORJR, (jr), (no_entries))
+
+/* IRSA - Input Ring Slots Available register holds the number of entries in
+ * the Job Ring's input ring. Once a job is enqueued, the value returned is
+ * decremented by the hardware by the number of jobs enqueued.
+ */
+#define hw_get_available_slots(jr) GET_JR_REG(IRSA, jr)
+
+/* ORSFR - Output Ring Slots Full register holds the number of jobs which were
+ * processed by the SEC and can be retrieved by the software. Once a job has
+ * been processed by software, the user will call hw_remove_one_entry in order
+ * to notify the SEC that the entry was processed.
+ */
+#define hw_get_no_finished_jobs(jr) GET_JR_REG(ORSFR, jr)
+
+/*
+ * Macros for manipulating JR registers
+ */
+#if CORE_BYTE_ORDER == CAAM_BYTE_ORDER
+#define sec_read_32(addr) (*(volatile unsigned int *)(addr))
+#define sec_write_32(addr, val) (*(volatile unsigned int *)(addr) = (val))
+
+#else
+#define sec_read_32(addr) rte_bswap32((*(volatile unsigned int *)(addr)))
+#define sec_write_32(addr, val) \
+ (*(volatile unsigned int *)(addr) = rte_bswap32(val))
+#endif
+
+#if CAAM_BYTE_ORDER == __LITTLE_ENDIAN
+#define sec_read_64(addr) (((u64)sec_read_32((u32 *)(addr) + 1) << 32) | \
+ (sec_read_32((u32 *)(addr))))
+
+#define sec_write_64(addr, val) { \
+ sec_write_32((u32 *)(addr) + 1, (u32)((val) >> 32)); \
+ sec_write_32((u32 *)(addr), (u32)(val)); \
+}
+#else /* CAAM_BYTE_ORDER == __BIG_ENDIAN */
+#define sec_read_64(addr) (((u64)sec_read_32((u32 *)(addr)) << 32) | \
+ (sec_read_32((u32 *)(addr) + 1)))
+
+#define sec_write_64(addr, val) { \
+ sec_write_32((u32 *)(addr), (u32)((val) >> 32)); \
+ sec_write_32((u32 *)(addr) + 1, (u32)(val)); \
+}
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+#define sec_read_addr(a) sec_read_64((a))
+#define sec_write_addr(a, v) sec_write_64((a), (v))
+#else
+#define sec_read_addr(a) sec_read_32((a))
+#define sec_write_addr(a, v) sec_write_32((a), (v))
+#endif
+
+#define JR_REG(name, jr) (CHAN_BASE(jr) + JR_REG_##name##_OFFSET)
+#define JR_REG_LO(name, jr) (CHAN_BASE(jr) + JR_REG_##name##_OFFSET_LO)
+
+#define GET_JR_REG(name, jr) (sec_read_32(JR_REG(name, (jr))))
+#define GET_JR_REG_LO(name, jr) (sec_read_32(JR_REG_LO(name, (jr))))
+
+#define SET_JR_REG(name, jr, value) \
+ (sec_write_32(JR_REG(name, (jr)), value))
+#define SET_JR_REG_LO(name, jr, value) \
+ (sec_write_32(JR_REG_LO(name, (jr)), value))
+
+/* Lists the possible states for a job ring. */
+typedef enum sec_job_ring_state_e {
+ SEC_JOB_RING_STATE_STARTED, /* Job ring is initialized */
+ SEC_JOB_RING_STATE_RESET, /* Job ring reset is in progress */
+} sec_job_ring_state_t;
+
+/* code or cmd block to caam */
+struct sec_cdb {
+ struct {
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t rsvd63_48;
+ unsigned int rsvd47_39:9;
+ unsigned int idlen:7;
+#else
+ unsigned int idlen:7;
+ unsigned int rsvd47_39:9;
+ uint16_t rsvd63_48;
+#endif
+ } field;
+ } __rte_packed hi;
+
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ unsigned int rsvd31_30:2;
+ unsigned int fsgt:1;
+ unsigned int lng:1;
+ unsigned int offset:2;
+ unsigned int abs:1;
+ unsigned int add_buf:1;
+ uint8_t pool_id;
+ uint16_t pool_buffer_size;
+#else
+ uint16_t pool_buffer_size;
+ uint8_t pool_id;
+ unsigned int add_buf:1;
+ unsigned int abs:1;
+ unsigned int offset:2;
+ unsigned int lng:1;
+ unsigned int fsgt:1;
+ unsigned int rsvd31_30:2;
+#endif
+ } field;
+ } __rte_packed lo;
+ } __rte_packed sh_hdr;
+
+ uint32_t sh_desc[SEC_JOB_DESCRIPTOR_SIZE];
+};
+
+struct caam_jr_qp {
+ struct sec_job_ring_t *ring;
+ uint64_t rx_pkts;
+ uint64_t rx_errs;
+ uint64_t rx_poll_err;
+ uint64_t tx_pkts;
+ uint64_t tx_errs;
+ uint64_t tx_ring_full;
+};
+
+struct sec_job_ring_t {
+ /* TODO: Add wrapper macro to make it obvious this is the consumer index
+ * on the output ring
+ */
+ uint32_t cidx; /* Consumer index for job ring (jobs array).
+ * @note: cidx and pidx are accessed from
+ * different threads. Place the cidx and pidx
+ * inside the structure so that they lay on
+ * different cachelines, to avoid false sharing
+ * between threads when the threads run on
+ * different cores!
+ */
+ /* TODO: Add wrapper macro to make it obvious this is the producer index
+ * on the input ring
+ */
+ uint32_t pidx; /* Producer index for job ring (jobs array) */
+
+ phys_addr_t *input_ring;/* Ring of output descriptors received from SEC.
+ * Size of array is power of 2 to allow fast
+ * update of producer/consumer indexes with
+ * bitwise operations.
+ */
+
+ struct sec_outring_entry *output_ring;
+ /* Ring of output descriptors received from SEC.
+ * Size of array is power of 2 to allow fast
+ * update of producer/consumer indexes with
+ * bitwise operations.
+ */
+
+ uint32_t irq_fd; /* The file descriptor used for polling from
+ * user space for interrupts notifications
+ */
+ uint32_t jr_mode; /* Model used by SEC Driver to receive
+ * notifications from SEC. Can be either
+ * of the three: #SEC_NOTIFICATION_TYPE_NAPI
+ * #SEC_NOTIFICATION_TYPE_IRQ or
+ * #SEC_NOTIFICATION_TYPE_POLL
+ */
+ uint32_t napi_mode; /* Job ring mode if NAPI mode is chosen
+ * Used only when jr_mode is set to
+ * #SEC_NOTIFICATION_TYPE_NAPI
+ */
+ void *register_base_addr; /* Base address for SEC's
+ * register memory for this job ring.
+ */
+ uint8_t coalescing_en; /* notifies if coelescing is
+ * enabled for the job ring
+ */
+ sec_job_ring_state_t jr_state; /* The state of this job ring */
+
+ struct rte_mempool *ctx_pool; /* per dev mempool for caam_jr_op_ctx */
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+ struct caam_jr_qp qps[RTE_CAAM_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+};
+
+/* Union describing the possible error codes that
+ * can be set in the descriptor status word
+ */
+union hw_error_code {
+ uint32_t error;
+ union {
+ struct {
+ uint32_t ssrc:4;
+ uint32_t ssed_val:28;
+ } __rte_packed value;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t res:28;
+ } __rte_packed no_status_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t cha_id:4;
+ uint32_t err_id:4;
+ } __rte_packed ccb_status_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t offset:8;
+ } __rte_packed jmp_halt_user_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t desc_err:8;
+ } __rte_packed deco_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t res:17;
+ uint32_t naddr:3;
+ uint32_t desc_err:8;
+ } __rte_packed jr_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t cond:8;
+ } __rte_packed jmp_halt_cond_src;
+ } __rte_packed error_desc;
+} __rte_packed;
+
+/* @brief Initialize a job ring/channel in SEC device.
+ * Write configuration register/s to properly initialize a job ring.
+ *
+ * @param [in] job_ring The job ring
+ *
+ * @retval 0 for success
+ * @retval other for error
+ */
+int hw_reset_job_ring(struct sec_job_ring_t *job_ring);
+
+/* @brief Reset a job ring/channel in SEC device.
+ * Write configuration register/s to reset a job ring.
+ *
+ * @param [in] job_ring The job ring
+ *
+ * @retval 0 for success
+ * @retval -1 in case job ring reset failed
+ */
+int hw_shutdown_job_ring(struct sec_job_ring_t *job_ring);
+
+/* @brief Handle a job ring/channel error in SEC device.
+ * Identify the error type and clear error bits if required.
+ *
+ * @param [in] job_ring The job ring
+ * @param [in] sec_error_code The job ring's error code
+ */
+void hw_handle_job_ring_error(struct sec_job_ring_t *job_ring,
+ uint32_t sec_error_code);
+
+/* @brief Handle a job ring error in the device.
+ * Identify the error type and printout a explanatory
+ * messages.
+ *
+ * @param [in] job_ring The job ring
+ *
+ */
+void hw_job_ring_error_print(struct sec_job_ring_t *job_ring, int code);
+
+/* @brief Set interrupt coalescing parameters on the Job Ring.
+ * @param [in] job_ring The job ring
+ * @param [in] irq_coalesing_timer Interrupt coalescing timer threshold.
+ * This value determines the maximum
+ * amount of time after processing a
+ * descriptor before raising an interrupt.
+ * @param [in] irq_coalescing_count Interrupt coalescing descriptor count
+ * threshold.
+ */
+int hw_job_ring_set_coalescing_param(struct sec_job_ring_t *job_ring,
+ uint16_t irq_coalescing_timer,
+ uint8_t irq_coalescing_count);
+
+/* @brief Enable interrupt coalescing on a job ring
+ * @param [in] job_ring The job ring
+ */
+int hw_job_ring_enable_coalescing(struct sec_job_ring_t *job_ring);
+
+/* @brief Disable interrupt coalescing on a job ring
+ * @param [in] job_ring The job ring
+ */
+int hw_job_ring_disable_coalescing(struct sec_job_ring_t *job_ring);
+
+#endif /* CAAM_JR_HW_SPECIFIC_H */
diff --git a/drivers/crypto/caam_jr/caam_jr_log.h b/drivers/crypto/caam_jr/caam_jr_log.h
new file mode 100644
index 00000000..106ff07a
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_log.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _CAAM_JR_LOG_H_
+#define _CAAM_JR_LOG_H_
+
+#include <rte_log.h>
+
+extern int caam_jr_logtype;
+
+#define CAAM_JR_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, caam_jr_logtype, "caam_jr: " \
+ fmt "\n", ##args)
+
+#define CAAM_JR_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, caam_jr_logtype, "caam_jr: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() CAAM_JR_DEBUG(" >>")
+
+#define CAAM_JR_INFO(fmt, args...) \
+ CAAM_JR_LOG(INFO, fmt, ## args)
+#define CAAM_JR_ERR(fmt, args...) \
+ CAAM_JR_LOG(ERR, fmt, ## args)
+#define CAAM_JR_WARN(fmt, args...) \
+ CAAM_JR_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define CAAM_JR_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt "\n", ## args)
+
+#define CAAM_JR_DP_DEBUG(fmt, args...) \
+ CAAM_JR_DP_LOG(DEBUG, fmt, ## args)
+#define CAAM_JR_DP_INFO(fmt, args...) \
+ CAAM_JR_DP_LOG(INFO, fmt, ## args)
+#define CAAM_JR_DP_WARN(fmt, args...) \
+ CAAM_JR_DP_LOG(WARNING, fmt, ## args)
+#define CAAM_JR_DP_ERR(fmt, args...) \
+ CAAM_JR_DP_LOG(ERR, fmt, ## args)
+
+#endif /* _CAAM_JR_LOG_H_ */
diff --git a/drivers/crypto/caam_jr/caam_jr_pvt.h b/drivers/crypto/caam_jr/caam_jr_pvt.h
new file mode 100644
index 00000000..9f1adabc
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_pvt.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_PVT_H
+#define CAAM_JR_PVT_H
+
+#include <hw/desc/ipsec.h>
+
+/* NXP CAAM JR PMD device name */
+
+#define CAAM_JR_ALG_UNSUPPORT (-1)
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor.
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+#define CAAM_JOB_DESC_SIZE 13
+
+/* CTX_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define CTX_POOL_NUM_BUFS 32000
+#define CTX_POOL_CACHE_SIZE 512
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+#define JR_MAX_NB_MAX_DIGEST 32
+
+#define RTE_CAAM_JR_PMD_MAX_NB_SESSIONS 2048
+
+
+/* Return codes for SEC user space driver APIs */
+enum sec_return_code_e {
+ SEC_SUCCESS = 0, /* Operation executed successfully.*/
+ SEC_INVALID_INPUT_PARAM, /* API received an invalid input
+ * parameter
+ */
+ SEC_OUT_OF_MEMORY, /* Memory allocation failed. */
+ SEC_DESCRIPTOR_IN_FLIGHT, /* API function indicates there are
+ * descriptors in flight
+ * for SEC to process.
+ */
+ SEC_LAST_DESCRIPTOR_IN_FLIGHT, /* API function indicates there is one
+ * last descriptor in flight
+ * for SEC to process that.
+ */
+ SEC_PROCESSING_ERROR, /* Indicates a SEC processing error
+ * occurred on a Job Ring which requires
+ * a SEC user space driver shutdown. Can
+ * be returned from sec_poll_job_ring().
+ * Then the only other API that can be
+ * called after this error is
+ * sec_release().
+ */
+ SEC_DESC_PROCESSING_ERROR, /* Indicates a SEC descriptor processing
+ * error occurred on a Job Ring. Can be
+ * returned from sec_poll_job_ring().
+ * The driver was able to reset job ring
+ * and job ring can be used like in a
+ * normal case.
+ */
+ SEC_JR_IS_FULL, /* Job Ring is full. There is no more
+ * room in the JR for new descriptors.
+ * This can happen if the descriptor RX
+ * rate is higher than SEC's capacity.
+ */
+ SEC_DRIVER_RELEASE_IN_PROGRESS, /* SEC driver shutdown is in progress,
+ * descriptors processing or polling is
+ * allowed.
+ */
+ SEC_DRIVER_ALREADY_INITIALIZED, /* SEC driver is already initialized.*/
+ SEC_DRIVER_NOT_INITIALIZED, /* SEC driver is NOT initialized. */
+ SEC_JOB_RING_RESET_IN_PROGRESS, /* Job ring is resetting due to a
+ * per-descriptor SEC processing error
+ * ::SEC_desc_PROCESSING_ERROR. Reset is
+ * finished when sec_poll_job_ring()
+ * return. Then the job ring can be used
+ * again.
+ */
+ SEC_RESET_ENGINE_FAILED, /* Resetting of SEC Engine by SEC Kernel
+ * Driver Failed
+ */
+ SEC_ENABLE_IRQS_FAILED, /* Enabling of IRQs in SEC Kernel Driver
+ * Failed
+ */
+ SEC_DISABLE_IRQS_FAILED, /* Disabling of IRQs in SEC Kernel
+ * Driver Failed
+ */
+ /* END OF VALID VALUES */
+
+ SEC_RETURN_CODE_MAX_VALUE, /* Invalid value for return code. It is
+ * used to mark the end of the return
+ * code values. @note ALL new return
+ * code values MUST be added before
+ * ::SEC_RETURN_CODE_MAX_VALUE!
+ */
+};
+
+enum caam_jr_op_type {
+ CAAM_JR_NONE, /* No Cipher operations*/
+ CAAM_JR_CIPHER,/* CIPHER operations */
+ CAAM_JR_AUTH, /* Authentication Operations */
+ CAAM_JR_AEAD, /* Authenticated Encryption with associated data */
+ CAAM_JR_IPSEC, /* IPSEC protocol operations*/
+ CAAM_JR_PDCP, /* PDCP protocol operations*/
+ CAAM_JR_PKC, /* Public Key Cryptographic Operations */
+ CAAM_JR_MAX
+};
+
+struct caam_jr_session {
+ uint8_t dir; /* Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /* Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /* Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /* AEAD Algorithm*/
+ enum rte_security_session_protocol proto_alg; /* Security Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /* pointer to key data */
+ size_t length; /* key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /* pointer to key data */
+ size_t length; /* key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /* pointer to key data */
+ size_t length; /* key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /* Initialisation vector parameters */
+ uint16_t auth_only_len; /* Length of data for Auth only */
+ uint32_t digest_length;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ip ip4_hdr;
+ struct ipsec_decap_pdb decap_pdb;
+ struct caam_jr_qp *qp;
+ struct sec_cdb *cdb; /* cmd block associated with qp */
+ struct rte_mempool *ctx_pool; /* session mempool for caam_jr_op_ctx */
+};
+
+/*
+ * 16-byte hardware scatter/gather table
+ */
+
+#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
+#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
+#define SEC4_SG_BPID_MASK 0x000000ff
+#define SEC4_SG_BPID_SHIFT 16
+#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
+#define SEC4_SG_OFFSET_MASK 0x00001fff
+
+struct sec4_sg_entry {
+ uint64_t ptr;
+ uint32_t len;
+ uint32_t bpid_offset;
+};
+
+#define MAX_SG_ENTRIES 16
+#define SG_CACHELINE_0 0
+#define SG_CACHELINE_1 4
+#define SG_CACHELINE_2 8
+#define SG_CACHELINE_3 12
+
+/* Structure encompassing a job descriptor which is to be processed
+ * by SEC. User should also initialise this structure with the callback
+ * function pointer which will be called by driver after recieving proccessed
+ * descriptor from SEC. User data is also passed in this data structure which
+ * will be sent as an argument to the user callback function.
+ */
+struct job_descriptor {
+ uint32_t desc[CAAM_JOB_DESC_SIZE];
+};
+
+struct caam_jr_op_ctx {
+ struct job_descriptor jobdes;
+ /* sg[0] output, sg[1] input, others are possible sub frames */
+ struct sec4_sg_entry sg[MAX_SG_ENTRIES];
+ struct rte_crypto_op *op;
+ struct rte_mempool *ctx_pool; /* mempool pointer for caam_jr_op_ctx */
+ int64_t vtop_offset;
+ uint8_t digest[JR_MAX_NB_MAX_DIGEST];
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+struct uio_job_ring {
+ uint32_t jr_id;
+ uint32_t uio_fd;
+ void *register_base_addr;
+ int map_size;
+ int uio_minor_number;
+};
+
+int sec_cleanup(void);
+int sec_configure(void);
+struct uio_job_ring *config_job_ring(void);
+void free_job_ring(uint32_t uio_fd);
+
+/* For Dma memory allocation of specified length and alignment */
+static inline void *
+caam_jr_dma_mem_alloc(size_t align, size_t len)
+{
+ return rte_malloc("mem_alloc", len, align);
+}
+
+/* For freeing dma memory */
+static inline void
+caam_jr_dma_free(void *ptr)
+{
+ rte_free(ptr);
+}
+
+static inline rte_iova_t
+caam_jr_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *ms;
+
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
+}
+
+static inline void *
+caam_jr_dma_ptov(rte_iova_t paddr)
+{
+ return rte_mem_iova2virt(paddr);
+}
+
+/* Virtual to physical address conversion */
+static inline rte_iova_t caam_jr_dma_vtop(void *ptr)
+{
+ return caam_jr_mem_vtop(ptr);
+}
+
+/** @brief Request to SEC kernel driver to enable interrupts for
+ * descriptor finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ enable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd Job Ring UIO File descriptor
+ * @retval 0 for success
+ * @retval -1 value for error
+ */
+uint32_t caam_jr_enable_irqs(uint32_t uio_fd);
+
+/** @brief Request to SEC kernel driver to disable interrupts for descriptor
+ * finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ disable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd UIO File descripto
+ * @retval 0 for success
+ * @retval -1 value for error
+ *
+ */
+uint32_t caam_jr_disable_irqs(uint32_t uio_fd);
+
+#endif
diff --git a/drivers/crypto/caam_jr/caam_jr_uio.c b/drivers/crypto/caam_jr/caam_jr_uio.c
new file mode 100644
index 00000000..c07d9db0
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_uio.c
@@ -0,0 +1,501 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_crypto.h>
+#include <rte_security.h>
+
+#include <caam_jr_config.h>
+#include <caam_jr_hw_specific.h>
+#include <caam_jr_pvt.h>
+#include <caam_jr_log.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+/* Prefix path to sysfs directory where UIO device attributes are exported.
+ * Path for UIO device X is /sys/class/uio/uioX
+ */
+#define SEC_UIO_DEVICE_SYS_ATTR_PATH "/sys/class/uio"
+
+/* Subfolder in sysfs where mapping attributes are exported
+ * for each UIO device. Path for mapping Y for device X is:
+ * /sys/class/uio/uioX/maps/mapY
+ */
+#define SEC_UIO_DEVICE_SYS_MAP_ATTR "maps/map"
+
+/* Name of UIO device file prefix. Each UIO device will have a device file
+ * /dev/uioX, where X is the minor device number.
+ */
+#define SEC_UIO_DEVICE_FILE_NAME "/dev/uio"
+
+/*
+ * Name of UIO device. Each user space SEC job ring will have a corresponding
+ * UIO device with the name sec-channelX, where X is the job ring id.
+ * Maximum length is #SEC_UIO_MAX_DEVICE_NAME_LENGTH.
+ *
+ * @note Must be kept in synch with SEC kernel driver
+ * define #SEC_UIO_DEVICE_NAME !
+ */
+#define SEC_UIO_DEVICE_NAME "fsl-jr"
+
+/* Maximum length for the name of an UIO device file.
+ * Device file name format is: /dev/uioX.
+ */
+#define SEC_UIO_MAX_DEVICE_FILE_NAME_LENGTH 30
+
+/* Maximum length for the name of an attribute file for an UIO device.
+ * Attribute files are exported in sysfs and have the name formatted as:
+ * /sys/class/uio/uioX/<attribute_file_name>
+ */
+#define SEC_UIO_MAX_ATTR_FILE_NAME 100
+
+/* Command that is used by SEC user space driver and SEC kernel driver
+ * to signal a request from the former to the later to disable job DONE
+ * and error IRQs on a certain job ring.
+ * The configuration is done at SEC Controller's level.
+ * @note Need to be kept in synch with #SEC_UIO_DISABLE_IRQ_CMD from
+ * linux/drivers/crypto/talitos.c !
+ */
+#define SEC_UIO_DISABLE_IRQ_CMD 0
+
+/* Command that is used by SEC user space driver and SEC kernel driver
+ * to signal a request from the former to the later to enable job DONE
+ * and error IRQs on a certain job ring.
+ * The configuration is done at SEC Controller's level.
+ * @note Need to be kept in synch with #SEC_UIO_ENABLE_IRQ_CMD from
+ * linux/drivers/crypto/talitos.c !
+ */
+#define SEC_UIO_ENABLE_IRQ_CMD 1
+
+/** Command that is used by SEC user space driver and SEC kernel driver
+ * to signal a request from the former to the later to do a SEC engine reset.
+ * @note Need to be kept in synch with #SEC_UIO_RESET_SEC_ENGINE_CMD from
+ * linux/drivers/crypto/talitos.c !
+ */
+#define SEC_UIO_RESET_SEC_ENGINE_CMD 3
+
+/* The id for the mapping used to export SEC's registers to
+ * user space through UIO devices.
+ */
+#define SEC_UIO_MAP_ID 0
+
+static struct uio_job_ring g_uio_job_ring[MAX_SEC_JOB_RINGS];
+static int g_uio_jr_num;
+
+/** @brief Checks if a file name contains a certain substring.
+ * If so, it extracts the number following the substring.
+ * This function assumes a filename format of: [text][number].
+ * @param [in] filename File name
+ * @param [in] match String to match in file name
+ * @param [out] number The number extracted from filename
+ *
+ * @retval true if file name matches the criteria
+ * @retval false if file name does not match the criteria
+ */
+static bool
+file_name_match_extract(const char filename[], const char match[], int *number)
+{
+ char *substr = NULL;
+
+ substr = strstr(filename, match);
+ if (substr == NULL)
+ return false;
+
+ /* substring <match> was found in <filename>
+ * read number following <match> substring in <filename>
+ */
+ if (sscanf(filename + strlen(match), "%d", number) <= 0)
+ return false;
+
+ return true;
+}
+
+/** @brief Reads first line from a file.
+ * Composes file name as: root/subdir/filename
+ *
+ * @param [in] root Root path
+ * @param [in] subdir Subdirectory name
+ * @param [in] filename File name
+ * @param [out] line The first line read from file.
+ *
+ * @retval 0 for succes
+ * @retval other value for error
+ */
+static int
+file_read_first_line(const char root[], const char subdir[],
+ const char filename[], char *line)
+{
+ char absolute_file_name[SEC_UIO_MAX_ATTR_FILE_NAME];
+ int fd = 0, ret = 0;
+
+ /*compose the file name: root/subdir/filename */
+ memset(absolute_file_name, 0, sizeof(absolute_file_name));
+ snprintf(absolute_file_name, SEC_UIO_MAX_ATTR_FILE_NAME,
+ "%s/%s/%s", root, subdir, filename);
+
+ fd = open(absolute_file_name, O_RDONLY);
+ SEC_ASSERT(fd > 0, fd, "Error opening file %s",
+ absolute_file_name);
+
+ /* read UIO device name from first line in file */
+ ret = read(fd, line, SEC_UIO_MAX_DEVICE_FILE_NAME_LENGTH);
+ close(fd);
+
+ /* NULL-ify string */
+ line[SEC_UIO_MAX_DEVICE_FILE_NAME_LENGTH - 1] = '\0';
+
+ if (ret <= 0) {
+ CAAM_JR_ERR("Error reading from file %s", absolute_file_name);
+ return ret;
+ }
+
+ return 0;
+}
+
+/** @brief Uses UIO control to send commands to SEC kernel driver.
+ * The mechanism is to write a command word into the file descriptor
+ * that the user-space driver obtained for each user-space SEC job ring.
+ * Both user-space driver and kernel driver must have the same understanding
+ * about the command codes.
+ *
+ * @param [in] UIO FD The UIO file descriptor
+ * @param [in] uio_command Command word
+ *
+ * @retval Result of write operation on the job ring's UIO file descriptor.
+ * Should be sizeof(int) for success operations.
+ * Other values can be returned and used, if desired to add special
+ * meaning to return values, but this has to be programmed in SEC
+ * kernel driver as well. No special return values are used.
+ */
+static int
+sec_uio_send_command(uint32_t uio_fd, int32_t uio_command)
+{
+ int ret;
+
+ /* Use UIO file descriptor we have for this job ring.
+ * Writing a command code to this file descriptor will make the
+ * SEC kernel driver execute the desired command.
+ */
+ ret = write(uio_fd, &uio_command, sizeof(int));
+ return ret;
+}
+
+/** @brief Request to SEC kernel driver to enable interrupts for
+ * descriptor finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ enable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd Job Ring UIO File descriptor
+ * @retval 0 for success
+ * @retval -1 value for error
+ */
+uint32_t
+caam_jr_enable_irqs(uint32_t uio_fd)
+{
+ int ret;
+
+ /* Use UIO file descriptor we have for this job ring.
+ * Writing a command code to this file descriptor will make the
+ * SEC kernel driver enable DONE and Error IRQs for this job ring,
+ * at Controller level.
+ */
+ ret = sec_uio_send_command(uio_fd, SEC_UIO_ENABLE_IRQ_CMD);
+ SEC_ASSERT(ret == sizeof(int), -1,
+ "Failed to request SEC engine to enable job done and "
+ "error IRQs through UIO control. UIO FD %d. Reset SEC driver!",
+ uio_fd);
+ CAAM_JR_DEBUG("Enabled IRQs on jr with uio_fd %d", uio_fd);
+ return 0;
+}
+
+
+/** @brief Request to SEC kernel driver to disable interrupts for descriptor
+ * finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ disable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd UIO File descripto
+ * @retval 0 for success
+ * @retval -1 value for error
+ *
+ */
+uint32_t
+caam_jr_disable_irqs(uint32_t uio_fd)
+{
+ int ret;
+
+ /* Use UIO file descriptor we have for this job ring.
+ * Writing a command code to this file descriptor will make the
+ * SEC kernel driver disable IRQs for this job ring,
+ * at Controller level.
+ */
+
+ ret = sec_uio_send_command(uio_fd, SEC_UIO_DISABLE_IRQ_CMD);
+ SEC_ASSERT(ret == sizeof(int), -1,
+ "Failed to request SEC engine to disable job done and "
+ "IRQs through UIO control. UIO_FD %d Reset SEC driver!",
+ uio_fd);
+ CAAM_JR_DEBUG("Disabled IRQs on jr with uio_fd %d", uio_fd);
+ return 0;
+}
+
+/** @brief Maps register range assigned for a job ring.
+ *
+ * @param [in] uio_device_fd UIO device file descriptor
+ * @param [in] uio_device_id UIO device id
+ * @param [in] uio_map_id UIO allows maximum 5 different mapping for
+ each device. Maps start with id 0.
+ * @param [out] map_size Map size.
+ * @retval NULL if failed to map registers
+ * @retval Virtual address for mapped register address range
+ */
+static void *
+uio_map_registers(int uio_device_fd, int uio_device_id,
+ int uio_map_id, int *map_size)
+{
+ void *mapped_address = NULL;
+ unsigned int uio_map_size = 0;
+ char uio_sys_root[SEC_UIO_MAX_ATTR_FILE_NAME];
+ char uio_sys_map_subdir[SEC_UIO_MAX_ATTR_FILE_NAME];
+ char uio_map_size_str[32];
+ int ret = 0;
+
+ /* compose the file name: root/subdir/filename */
+ memset(uio_sys_root, 0, sizeof(uio_sys_root));
+ memset(uio_sys_map_subdir, 0, sizeof(uio_sys_map_subdir));
+ memset(uio_map_size_str, 0, sizeof(uio_map_size_str));
+
+ /* Compose string: /sys/class/uio/uioX */
+ sprintf(uio_sys_root, "%s/%s%d", SEC_UIO_DEVICE_SYS_ATTR_PATH,
+ "uio", uio_device_id);
+ /* Compose string: maps/mapY */
+ sprintf(uio_sys_map_subdir, "%s%d", SEC_UIO_DEVICE_SYS_MAP_ATTR,
+ uio_map_id);
+
+ /* Read first (and only) line from file
+ * /sys/class/uio/uioX/maps/mapY/size
+ */
+ ret = file_read_first_line(uio_sys_root, uio_sys_map_subdir,
+ "size", uio_map_size_str);
+ SEC_ASSERT(ret == 0, NULL, "file_read_first_line() failed");
+
+ /* Read mapping size, expressed in hexa(base 16) */
+ uio_map_size = strtol(uio_map_size_str, NULL, 16);
+
+ /* Map the region in user space */
+ mapped_address = mmap(0, /*dynamically choose virtual address */
+ uio_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, uio_device_fd, 0);
+ /* offset = 0 because UIO device has only one mapping
+ * for the entire SEC register memory
+ */
+ if (mapped_address == MAP_FAILED) {
+ CAAM_JR_ERR(
+ "Failed to map registers! errno = %d job ring fd = %d,"
+ "uio device id = %d, uio map id = %d", errno,
+ uio_device_fd, uio_device_id, uio_map_id);
+ return NULL;
+ }
+
+ /*
+ * Save the map size to use it later on for munmap-ing.
+ */
+ *map_size = uio_map_size;
+
+ CAAM_JR_INFO("UIO dev[%d] mapped region [id =%d] size 0x%x at %p",
+ uio_device_id, uio_map_id, uio_map_size, mapped_address);
+
+ return mapped_address;
+}
+
+void
+free_job_ring(uint32_t uio_fd)
+{
+ struct uio_job_ring *job_ring = NULL;
+ int i;
+
+ if (!job_ring->uio_fd)
+ return;
+
+ for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
+ if (g_uio_job_ring[i].uio_fd == uio_fd) {
+ job_ring = &g_uio_job_ring[i];
+ break;
+ }
+ }
+
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("JR not available for fd = %x\n", uio_fd);
+ return;
+ }
+
+ /* Open device file */
+ CAAM_JR_INFO("Closed device file for job ring %d , fd = %d",
+ job_ring->jr_id, job_ring->uio_fd);
+ close(job_ring->uio_fd);
+ g_uio_jr_num--;
+ job_ring->uio_fd = 0;
+ if (job_ring->register_base_addr == NULL)
+ return;
+
+ /* Unmap the PCI memory resource of device */
+ if (munmap(job_ring->register_base_addr, job_ring->map_size)) {
+ CAAM_JR_INFO("cannot munmap(%p, 0x%lx): %s",
+ job_ring->register_base_addr,
+ (unsigned long)job_ring->map_size, strerror(errno));
+ } else
+ CAAM_JR_DEBUG(" JR UIO memory unmapped at %p",
+ job_ring->register_base_addr);
+ job_ring->register_base_addr = NULL;
+}
+
+struct
+uio_job_ring *config_job_ring(void)
+{
+ char uio_device_file_name[32];
+ struct uio_job_ring *job_ring = NULL;
+ int i;
+
+ for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
+ if (g_uio_job_ring[i].uio_fd == 0) {
+ job_ring = &g_uio_job_ring[i];
+ g_uio_jr_num++;
+ break;
+ }
+ }
+
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("No free job ring\n");
+ return NULL;
+ }
+
+ /* Find UIO device created by SEC kernel driver for this job ring. */
+ memset(uio_device_file_name, 0, sizeof(uio_device_file_name));
+
+ sprintf(uio_device_file_name, "%s%d", SEC_UIO_DEVICE_FILE_NAME,
+ job_ring->uio_minor_number);
+
+ /* Open device file */
+ job_ring->uio_fd = open(uio_device_file_name, O_RDWR);
+ SEC_ASSERT(job_ring->uio_fd > 0, NULL,
+ "Failed to open UIO device file for job ring %d",
+ job_ring->jr_id);
+
+ CAAM_JR_INFO("Open device(%s) file for job ring=%d , uio_fd = %d",
+ uio_device_file_name, job_ring->jr_id, job_ring->uio_fd);
+
+ ASSERT(job_ring->register_base_addr == NULL);
+ job_ring->register_base_addr = uio_map_registers(
+ job_ring->uio_fd, job_ring->uio_minor_number,
+ SEC_UIO_MAP_ID, &job_ring->map_size);
+
+ SEC_ASSERT(job_ring->register_base_addr != NULL, NULL,
+ "Failed to map SEC registers");
+ return job_ring;
+}
+
+int
+sec_configure(void)
+{
+ char uio_name[32];
+ int config_jr_no = 0, jr_id = -1;
+ int uio_minor_number = -1;
+ int ret;
+ DIR *d = NULL;
+ struct dirent *dir;
+
+ d = opendir(SEC_UIO_DEVICE_SYS_ATTR_PATH);
+ if (d == NULL) {
+ printf("\nError opening directory '%s': %s\n",
+ SEC_UIO_DEVICE_SYS_ATTR_PATH, strerror(errno));
+ return -1;
+ }
+
+ /* Iterate through all subdirs */
+ while ((dir = readdir(d)) != NULL) {
+ if (!strncmp(dir->d_name, ".", 1) ||
+ !strncmp(dir->d_name, "..", 2))
+ continue;
+
+ if (file_name_match_extract
+ (dir->d_name, "uio", &uio_minor_number)) {
+ /*
+ * Open file uioX/name and read first line which contains
+ * the name for the device. Based on the name check if this
+ * UIO device is UIO device for job ring with id jr_id.
+ */
+ memset(uio_name, 0, sizeof(uio_name));
+ ret = file_read_first_line(SEC_UIO_DEVICE_SYS_ATTR_PATH,
+ dir->d_name, "name", uio_name);
+ CAAM_JR_INFO("sec device uio name: %s", uio_name);
+ SEC_ASSERT(ret == 0, -1, "file_read_first_line failed");
+
+ if (file_name_match_extract(uio_name,
+ SEC_UIO_DEVICE_NAME,
+ &jr_id)) {
+ g_uio_job_ring[config_jr_no].jr_id = jr_id;
+ g_uio_job_ring[config_jr_no].uio_minor_number =
+ uio_minor_number;
+ CAAM_JR_INFO("Detected logical JRID:%d", jr_id);
+ config_jr_no++;
+
+ /* todo find the actual ring id
+ * OF_FULLNAME=/soc/crypto@1700000/jr@20000
+ */
+ }
+ }
+ }
+ closedir(d);
+
+ if (config_jr_no == 0) {
+ CAAM_JR_ERR("! No SEC Job Rings assigned for userspace usage!");
+ return 0;
+ }
+ CAAM_JR_INFO("Total JR detected =%d", config_jr_no);
+ return config_jr_no;
+}
+
+int
+sec_cleanup(void)
+{
+ int i;
+ struct uio_job_ring *job_ring;
+
+ for (i = 0; i < g_uio_jr_num; i++) {
+ job_ring = &g_uio_job_ring[i];
+ /* munmap SEC's register memory */
+ if (job_ring->register_base_addr) {
+ munmap(job_ring->register_base_addr,
+ job_ring->map_size);
+ job_ring->register_base_addr = NULL;
+ }
+ /* I need to close the fd after shutdown UIO commands need to be
+ * sent using the fd
+ */
+ if (job_ring->uio_fd != 0) {
+ CAAM_JR_INFO(
+ "Closed device file for job ring %d , fd = %d",
+ job_ring->jr_id, job_ring->uio_fd);
+ close(job_ring->uio_fd);
+ }
+ }
+ return 0;
+}
diff --git a/drivers/crypto/caam_jr/meson.build b/drivers/crypto/caam_jr/meson.build
new file mode 100644
index 00000000..99b71aef
--- /dev/null
+++ b/drivers/crypto/caam_jr/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_vdev', 'bus_dpaa', 'security']
+sources = files('caam_jr_capabilities.c',
+ 'caam_jr_hw.c',
+ 'caam_jr_uio.c',
+ 'caam_jr.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('../dpaa2_sec/')
+includes += include_directories('../../bus/dpaa/include/')
diff --git a/drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map b/drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map
new file mode 100644
index 00000000..521e51f4
--- /dev/null
+++ b/drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map
@@ -0,0 +1,4 @@
+DPDK_18.11 {
+
+ local: *;
+};
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index da3d8f84..f537f76a 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -4,13 +4,6 @@
#
include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
-$(error "RTE_LIBRTE_SECURITY is required to build RTE_LIBRTE_PMD_DPAA2_SEC")
-endif
-endif
-
#
# library name
#
@@ -20,7 +13,6 @@ LIB = librte_pmd_dpaa2_sec.a
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D _GNU_SOURCE
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
@@ -41,7 +33,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
EXPORT_MAP := rte_pmd_dpaa2_sec_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
@@ -51,5 +43,6 @@ LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_mempool_dpaa2
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 2a3c61c6..6095c602 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
@@ -10,7 +10,6 @@
#include <rte_mbuf.h>
#include <rte_cryptodev.h>
-#include <rte_security_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -24,10 +23,12 @@
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_dpio.h>
#include <dpaa2_hw_mempool.h>
+#include <fsl_dpopr.h>
#include <fsl_dpseci.h>
#include <fsl_mc_sys.h>
#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_event.h"
#include "dpaa2_sec_logs.h"
/* Required types */
@@ -35,6 +36,7 @@ typedef uint64_t dma_addr_t;
/* RTA header files */
#include <hw/desc/ipsec.h>
+#include <hw/desc/pdcp.h>
#include <hw/desc/algo.h>
/* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -62,11 +64,87 @@ static uint8_t cryptodev_driver_id;
int dpaa2_logtype_sec;
static inline int
+build_proto_compound_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ struct rte_mbuf *src_mbuf = sym_op->m_src;
+ struct rte_mbuf *dst_mbuf = sym_op->m_dst;
+ int retval;
+
+ if (!dst_mbuf)
+ dst_mbuf = src_mbuf;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* we are using the first FLE entry to store Mbuf */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("Memory alloc failed");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(op_fle, bpid);
+ DPAA2_SET_FLE_BPID(ip_fle, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(op_fle);
+ DPAA2_SET_FLE_IVP(ip_fle);
+ }
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
+
+ /* Configure Output FLE with dst mbuf data */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
+ DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
+ DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
+
+ /* Configure Input FLE with src mbuf data */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
+ DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
+ DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
+
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+#ifdef ENABLE_HFN_OVERRIDE
+ if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+ /*enable HFN override override */
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
+ DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
+ }
+#endif
+
+ return 0;
+
+}
+
+static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
{
struct rte_crypto_sym_op *sym_op = op->sym;
+ if (sym_op->m_dst)
+ return build_proto_compound_fd(sess, op, fd, bpid);
+
struct ctxt_priv *priv = sess->ctxt;
struct sec_flow_context *flc;
struct rte_mbuf *mbuf = sym_op->m_src;
@@ -1124,6 +1202,9 @@ build_sec_fd(struct rte_crypto_op *op,
case DPAA2_SEC_IPSEC:
ret = build_proto_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_PDCP:
+ ret = build_proto_compound_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_HASH_CIPHER:
default:
DPAA2_SEC_ERR("error: Unsupported session");
@@ -1145,6 +1226,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
struct qbman_swp *swp;
uint16_t num_tx = 0;
+ uint32_t flags[MAX_TX_RING_SLOTS] = {0};
/*todo - need to support multiple buffer pools */
uint16_t bpid;
struct rte_mempool *mb_pool;
@@ -1172,9 +1254,19 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
swp = DPAA2_PER_LCORE_PORTAL;
while (nb_ops) {
- frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+ frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_ops;
for (loop = 0; loop < frames_to_send; loop++) {
+ if ((*ops)->sym->m_src->seqn) {
+ uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
+
+ flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
+ }
+
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
mb_pool = (*ops)->sym->m_src->pool;
@@ -1191,7 +1283,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
while (loop < frames_to_send) {
loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop],
- NULL,
+ &flags[loop],
frames_to_send - loop);
}
@@ -1216,6 +1308,9 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
@@ -1226,9 +1321,6 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
else
mbuf->data_off += SEC_FLC_DHR_INBOUND;
- diff = len - mbuf->pkt_len;
- mbuf->pkt_len += diff;
- mbuf->data_len += diff;
return op;
}
@@ -1273,6 +1365,16 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
} else
dst = src;
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ dpaa2_sec_session *sess = (dpaa2_sec_session *)
+ get_sec_session_private_data(op->sym->sec_session);
+ if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ dst->pkt_len = len;
+ dst->data_len = len;
+ }
+ }
+
DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
(void *)dst,
@@ -1321,8 +1423,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
- (nb_ops > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_ops);
+ (nb_ops > dpaa2_dqrr_size) ?
+ dpaa2_dqrr_size : nb_ops);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
@@ -2099,6 +2201,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
return -1;
}
+ memset(session, 0, sizeof(dpaa2_sec_session));
/* Default IV length = 0 */
session->iv.length = 0;
@@ -2139,107 +2242,127 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
}
static int
-dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
- struct rte_security_session_conf *conf,
- void *sess)
+dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
+ dpaa2_sec_session *session,
+ struct alginfo *aeaddata)
{
- struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
- struct rte_crypto_auth_xform *auth_xform;
- struct rte_crypto_cipher_xform *cipher_xform;
- dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
- struct ctxt_priv *priv;
- struct ipsec_encap_pdb encap_pdb;
- struct ipsec_decap_pdb decap_pdb;
- struct alginfo authdata, cipherdata;
- int bufsize;
- struct sec_flow_context *flc;
-
PMD_INIT_FUNC_TRACE();
- if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
- cipher_xform = &conf->crypto_xform->cipher;
- auth_xform = &conf->crypto_xform->next->auth;
- } else {
- auth_xform = &conf->crypto_xform->auth;
- cipher_xform = &conf->crypto_xform->next->cipher;
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for aead key");
+ return -1;
}
- priv = (struct ctxt_priv *)rte_zmalloc(NULL,
- sizeof(struct ctxt_priv) +
- sizeof(struct sec_flc_desc),
- RTE_CACHE_LINE_SIZE);
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
- if (priv == NULL) {
- DPAA2_SEC_ERR("No memory for priv CTXT");
- return -ENOMEM;
- }
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
- flc = &priv->flc_desc[0].flc;
+ aeaddata->key = (size_t)session->aead_key.data;
+ aeaddata->keylen = session->aead_key.length;
+ aeaddata->key_enc_flags = 0;
+ aeaddata->key_type = RTA_DATA_IMM;
- session->ctxt_type = DPAA2_SEC_IPSEC;
- session->cipher_key.data = rte_zmalloc(NULL,
- cipher_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->cipher_key.data == NULL &&
- cipher_xform->key.length > 0) {
- DPAA2_SEC_ERR("No Memory for cipher key");
- rte_free(priv);
- return -ENOMEM;
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ aeaddata->algmode = OP_ALG_AAI_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ aeaddata->algmode = OP_ALG_AAI_CCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
+ return -1;
}
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
- session->cipher_key.length = cipher_xform->key.length;
- session->auth_key.data = rte_zmalloc(NULL,
- auth_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->auth_key.data == NULL &&
- auth_xform->key.length > 0) {
- DPAA2_SEC_ERR("No Memory for auth key");
- rte_free(session->cipher_key.data);
- rte_free(priv);
- return -ENOMEM;
+ return 0;
+}
+
+static int
+dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
+ struct rte_crypto_auth_xform *auth_xform,
+ dpaa2_sec_session *session,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ if (cipher_xform) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ }
+
+ if (auth_xform) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
}
- session->auth_key.length = auth_xform->key.length;
- memcpy(session->cipher_key.data, cipher_xform->key.data,
- cipher_xform->key.length);
- memcpy(session->auth_key.data, auth_xform->key.data,
- auth_xform->key.length);
- authdata.key = (size_t)session->auth_key.data;
- authdata.keylen = session->auth_key.length;
- authdata.key_enc_flags = 0;
- authdata.key_type = RTA_DATA_IMM;
- switch (auth_xform->algo) {
+ authdata->key = (size_t)session->auth_key.data;
+ authdata->keylen = session->auth_key.length;
+ authdata->key_enc_flags = 0;
+ authdata->key_type = RTA_DATA_IMM;
+ switch (session->auth_alg) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_CMAC:
- authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
- session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
break;
case RTE_CRYPTO_AUTH_NULL:
- authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
- session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
@@ -2255,50 +2378,119 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
- auth_xform->algo);
- goto out;
+ session->auth_alg);
+ return -1;
default:
DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
- auth_xform->algo);
- goto out;
+ session->auth_alg);
+ return -1;
}
- cipherdata.key = (size_t)session->cipher_key.data;
- cipherdata.keylen = session->cipher_key.length;
- cipherdata.key_enc_flags = 0;
- cipherdata.key_type = RTA_DATA_IMM;
+ cipherdata->key = (size_t)session->cipher_key.data;
+ cipherdata->keylen = session->cipher_key.length;
+ cipherdata->key_enc_flags = 0;
+ cipherdata->key_type = RTA_DATA_IMM;
- switch (cipher_xform->algo) {
+ switch (session->cipher_alg) {
case RTE_CRYPTO_CIPHER_AES_CBC:
- cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
- cipherdata.algmode = OP_ALG_AAI_CBC;
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
+ cipherdata->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
- cipherdata.algtype = OP_PCL_IPSEC_3DES;
- cipherdata.algmode = OP_ALG_AAI_CBC;
- session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ cipherdata->algtype = OP_PCL_IPSEC_3DES;
+ cipherdata->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
- cipherdata.algmode = OP_ALG_AAI_CTR;
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
+ cipherdata->algmode = OP_ALG_AAI_CTR;
break;
case RTE_CRYPTO_CIPHER_NULL:
- cipherdata.algtype = OP_PCL_IPSEC_NULL;
+ cipherdata->algtype = OP_PCL_IPSEC_NULL;
break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
- cipher_xform->algo);
- goto out;
+ session->cipher_alg);
+ return -1;
default:
DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
- cipher_xform->algo);
+ session->cipher_alg);
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_SECURITY_TEST
+static uint8_t aes_cbc_iv[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
+#endif
+
+static int
+dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_aead_xform *aead_xform = NULL;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ipsec_decap_pdb decap_pdb;
+ struct alginfo authdata, cipherdata;
+ int bufsize;
+ struct sec_flow_context *flc;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ int ret = -1;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ memset(session, 0, sizeof(dpaa2_sec_session));
+
+ if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ if (conf->crypto_xform->next)
+ auth_xform = &conf->crypto_xform->next->auth;
+ ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ session, &cipherdata, &authdata);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &conf->crypto_xform->auth;
+ if (conf->crypto_xform->next)
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ session, &cipherdata, &authdata);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &conf->crypto_xform->aead;
+ ret = dpaa2_sec_ipsec_aead_init(aead_xform,
+ session, &cipherdata);
+ } else {
+ DPAA2_SEC_ERR("XFORM not specified");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (ret) {
+ DPAA2_SEC_ERR("Failed to process xform");
goto out;
}
+ session->ctxt_type = DPAA2_SEC_IPSEC;
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
struct ip ip4_hdr;
@@ -2310,7 +2502,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
ip4_hdr.ip_id = 0;
ip4_hdr.ip_off = 0;
ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
- ip4_hdr.ip_p = 0x32;
+ ip4_hdr.ip_p = IPPROTO_ESP;
ip4_hdr.ip_sum = 0;
ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
@@ -2322,13 +2514,14 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
PDBOPTS_ESP_OIHI_PDB_INL |
PDBOPTS_ESP_IVSRC |
- PDBHMO_ESP_ENCAP_DTTL;
+ PDBHMO_ESP_ENCAP_DTTL |
+ PDBHMO_ESP_SNR;
encap_pdb.spi = ipsec_xform->spi;
encap_pdb.ip_hdr_len = sizeof(struct ip);
session->dir = DIR_ENC;
bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
- 1, 0, &encap_pdb,
+ 1, 0, SHR_SERIAL, &encap_pdb,
(uint8_t *)&ip4_hdr,
&cipherdata, &authdata);
} else if (ipsec_xform->direction ==
@@ -2338,7 +2531,8 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
decap_pdb.options = sizeof(struct ip) << 16;
session->dir = DIR_DEC;
bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
- 1, 0, &decap_pdb, &cipherdata, &authdata);
+ 1, 0, SHR_SERIAL,
+ &decap_pdb, &cipherdata, &authdata);
} else
goto out;
@@ -2372,6 +2566,244 @@ out:
rte_free(session->auth_key.data);
rte_free(session->cipher_key.data);
rte_free(priv);
+ return ret;
+}
+
+static int
+dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
+ struct rte_crypto_sym_xform *xform = conf->crypto_xform;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata, cipherdata;
+ int bufsize = -1;
+ struct sec_flow_context *flc;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = true;
+#else
+ int swap = false;
+#endif
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(session, 0, sizeof(dpaa2_sec_session));
+
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ /* find xfrm types */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ cipher_xform = &xform->cipher;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ } else {
+ DPAA2_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+
+ session->ctxt_type = DPAA2_SEC_PDCP;
+ if (cipher_xform) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->dir =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ session->dir = DIR_ENC;
+ }
+
+ session->pdcp.domain = pdcp_xform->domain;
+ session->pdcp.bearer = pdcp_xform->bearer;
+ session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
+ session->pdcp.sn_size = pdcp_xform->sn_size;
+#ifdef ENABLE_HFN_OVERRIDE
+ session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
+#endif
+ session->pdcp.hfn = pdcp_xform->hfn;
+ session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
+
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (session->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ session->cipher_alg);
+ goto out;
+ }
+
+ /* Auth is only applicable for control mode operation. */
+ if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
+ if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
+ DPAA2_SEC_ERR(
+ "PDCP Seq Num size should be 5 bits for cmode");
+ goto out;
+ }
+ if (auth_xform) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ }
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ switch (session->auth_alg) {
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ authdata.algtype = PDCP_AUTH_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ authdata.algtype = PDCP_AUTH_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = PDCP_AUTH_TYPE_AES;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = PDCP_AUTH_TYPE_NULL;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ session->auth_alg);
+ goto out;
+ }
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_pdcp_c_plane_encap(
+ priv->flc_desc[0].desc, 1, swap,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ else if (session->dir == DIR_DEC)
+ bufsize = cnstr_shdsc_pdcp_c_plane_decap(
+ priv->flc_desc[0].desc, 1, swap,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ } else {
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_pdcp_u_plane_encap(
+ priv->flc_desc[0].desc, 1, swap,
+ (enum pdcp_sn_size)pdcp_xform->sn_size,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, 0);
+ else if (session->dir == DIR_DEC)
+ bufsize = cnstr_shdsc_pdcp_u_plane_decap(
+ priv->flc_desc[0].desc, 1, swap,
+ (enum pdcp_sn_size)pdcp_xform->sn_size,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, 0);
+ }
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
+ /* Enable the stashing control bit */
+ DPAA2_SET_FLC_RSC(flc);
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq) | 0x14);
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+
+ flc->word1_sdl = (uint8_t)bufsize;
+
+ /* Set EWS bit i.e. enable write-safe */
+ DPAA2_SET_FLC_EWS(flc);
+ /* Set BS = 1 i.e reuse input buffers as output buffers */
+ DPAA2_SET_FLC_REUSE_BS(flc);
+ /* Set FF = 10; reuse input buffers if they provide sufficient space */
+ DPAA2_SET_FLC_REUSE_FF(flc);
+
+ session->ctxt = priv;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
return -1;
}
@@ -2397,6 +2829,10 @@ dpaa2_sec_security_session_create(void *dev,
break;
case RTE_SECURITY_PROTOCOL_MACSEC:
return -ENOTSUP;
+ case RTE_SECURITY_PROTOCOL_PDCP:
+ ret = dpaa2_sec_set_pdcp_session(cdev, conf,
+ sess_private_data);
+ break;
default:
return -EINVAL;
}
@@ -2686,6 +3122,129 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
}
}
+static void __attribute__((hot))
+dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ /* Prefetching mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+ /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+ ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+ (rxq->dev))->driver_id);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+static void
+dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ uint8_t dqrr_index;
+ struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+ /* Prefetching mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+ /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+ (rxq->dev))->driver_id);
+ dqrr_index = qbman_get_dqrr_idx(dq);
+ crypto_op->sym->m_src->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
+}
+
+int
+dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t dpcon_id,
+ const struct rte_event *event)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
+ struct dpseci_rx_queue_cfg cfg;
+ int ret;
+
+ if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
+ qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
+ else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
+ qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+ else
+ return -EINVAL;
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+ cfg.options = DPSECI_QUEUE_OPT_DEST;
+ cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
+ cfg.dest_cfg.dest_id = dpcon_id;
+ cfg.dest_cfg.priority = event->priority;
+
+ cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (size_t)(qp);
+ if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
+ cfg.order_preservation_en = 1;
+ }
+ ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int
+dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int ret;
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+ cfg.options = DPSECI_QUEUE_OPT_DEST;
+ cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
+
+ ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
+
+ return ret;
+}
+
static struct rte_cryptodev_ops crypto_ops = {
.dev_configure = dpaa2_sec_dev_configure,
.dev_start = dpaa2_sec_dev_start,
@@ -2708,7 +3267,7 @@ dpaa2_sec_capabilities_get(void *device __rte_unused)
return dpaa2_sec_security_cap;
}
-struct rte_security_ops dpaa2_sec_security_ops = {
+static const struct rte_security_ops dpaa2_sec_security_ops = {
.session_create = dpaa2_sec_security_session_create,
.session_update = NULL,
.session_stats_get = NULL,
@@ -2843,7 +3402,7 @@ init_error:
}
static int
-cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_cryptodev *cryptodev;
@@ -2871,7 +3430,6 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
dpaa2_dev->cryptodev = cryptodev;
cryptodev->device = &dpaa2_dev->device;
- cryptodev->device->driver = &dpaa2_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h
new file mode 100644
index 00000000..97709942
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ *
+ */
+
+#ifndef _DPAA2_SEC_EVENT_H_
+#define _DPAA2_SEC_EVENT_H_
+
+int
+dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t dpcon_id,
+ const struct rte_event *event);
+
+int dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id);
+
+#endif /* _DPAA2_SEC_EVENT_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index d015be1e..51751103 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -8,6 +8,8 @@
#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#include <rte_security_driver.h>
+
#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
/**< NXP DPAA2 - SEC PMD device name */
@@ -135,6 +137,19 @@ struct dpaa2_sec_aead_ctxt {
uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
};
+/*
+ * The structure is to be filled by user for PDCP Protocol
+ */
+struct dpaa2_pdcp_ctxt {
+ enum rte_security_pdcp_domain domain; /*!< Data/Control mode*/
+ int8_t bearer; /*!< PDCP bearer ID */
+ int8_t pkt_dir;/*!< PDCP Frame Direction 0:UL 1:DL*/
+ int8_t hfn_ovd;/*!< Overwrite HFN per packet*/
+ uint32_t hfn; /*!< Hyper Frame Number */
+ uint32_t hfn_threshold; /*!< HFN Threashold for key renegotiation */
+ uint8_t sn_size; /*!< Sequence number size, 7/12/15 */
+};
+
typedef struct dpaa2_sec_session_entry {
void *ctxt;
uint8_t ctxt_type;
@@ -158,15 +173,20 @@ typedef struct dpaa2_sec_session_entry {
} auth_key;
};
};
- struct {
- uint16_t length; /**< IV length in bytes */
- uint16_t offset; /**< IV offset in bytes */
- } iv;
- uint16_t digest_length;
- uint8_t status;
union {
- struct dpaa2_sec_aead_ctxt aead_ctxt;
- } ext_params;
+ struct {
+ struct {
+ uint16_t length; /**< IV length in bytes */
+ uint16_t offset; /**< IV offset in bytes */
+ } iv;
+ uint16_t digest_length;
+ uint8_t status;
+ union {
+ struct dpaa2_sec_aead_ctxt aead_ctxt;
+ } ext_params;
+ };
+ struct dpaa2_pdcp_ctxt pdcp;
+ };
} dpaa2_sec_session;
static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
@@ -390,6 +410,162 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+static const struct rte_cryptodev_capabilities dpaa2_pdcp_capabilities[] = {
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
static const struct rte_security_capability dpaa2_sec_security_cap[] = {
{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
@@ -413,6 +589,24 @@ static const struct rte_security_capability dpaa2_sec_security_cap[] = {
},
.crypto_capabilities = dpaa2_sec_capabilities
},
+ { /* PDCP Lookaside Protocol offload Data */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_DATA,
+ .capa_flags = 0
+ },
+ .crypto_capabilities = dpaa2_pdcp_capabilities
+ },
+ { /* PDCP Lookaside Protocol offload Control */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_CONTROL,
+ .capa_flags = 0
+ },
+ .crypto_capabilities = dpaa2_pdcp_capabilities
+ },
{
.action = RTE_SECURITY_ACTION_TYPE_NONE
}
diff --git a/drivers/crypto/dpaa2_sec/hw/desc.h b/drivers/crypto/dpaa2_sec/hw/desc.h
index e9255832..5d99dd8a 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -588,7 +588,7 @@
#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS_PRF (0x0c << OP_PCLID_SHIFT)
#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
@@ -612,7 +612,7 @@
#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10 (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
@@ -665,643 +665,179 @@
#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
-/* For SSL 3.0 - OP_PCLID_SSL30 */
-#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
-#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
-#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_1 0x009C
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_1 0x009D
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_2 0x009E
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_2 0x009F
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_3 0x00A0
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_3 0x00A1
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_4 0x00A2
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_4 0x00A3
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_5 0x00A4
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_5 0x00A5
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_6 0x00A6
-
-#define OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384 0x00A7
-#define OP_PCL_TLS_PSK_AES_128_GCM_SHA256 0x00A8
-#define OP_PCL_TLS_PSK_AES_256_GCM_SHA384 0x00A9
-#define OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256 0x00AA
-#define OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384 0x00AB
-#define OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256 0x00AC
-#define OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384 0x00AD
-#define OP_PCL_TLS_PSK_AES_128_CBC_SHA256 0x00AE
-#define OP_PCL_TLS_PSK_AES_256_CBC_SHA384 0x00AF
-#define OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256 0x00B2
-#define OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384 0x00B3
-#define OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256 0x00B6
-#define OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384 0x00B7
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
-
-#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
-#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
-#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
-#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
-#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
-#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
-#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
-#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_SSL30_RC4_128_MD5 0x0024
-#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
-#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_SSL30_RC4_40_MD5 0x002b
-#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
-#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_SSL30_RC4_128_SHA 0x0020
-#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
-#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
-#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
-#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
-#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
-#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
-#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
-#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
-#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_SSL30_RC4_40_SHA 0x0028
-
-/* For TLS 1.0 - OP_PCLID_TLS10 */
-#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256 0xC023
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384 0xC024
-#define OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256 0xC025
-#define OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384 0xC026
-#define OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256 0xC027
-#define OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384 0xC028
-#define OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256 0xC029
-#define OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384 0xC02A
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256 0xC02B
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384 0xC02C
-#define OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256 0xC02D
-#define OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384 0xC02E
-#define OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256 0xC02F
-#define OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384 0xC030
-#define OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256 0xC031
-#define OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384 0xC032
-#define OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA 0xC033
-#define OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA 0xC034
-#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA 0xC035
-#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA 0xC036
-#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256 0xC037
-#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384 0xC038
-
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS10_RC4_128_MD5 0x0024
-#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS10_RC4_40_MD5 0x002b
-#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS10_RC4_128_SHA 0x0020
-#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS10_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
-
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA160 0xff90
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA384 0xff93
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA224 0xff94
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA512 0xff95
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA256 0xff96
-#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE 0xfffe
-#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF 0xffff
-
-/* For TLS 1.1 - OP_PCLID_TLS11 */
-#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS11_RC4_128_MD5 0x0024
-#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS11_RC4_40_MD5 0x002b
-#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS11_RC4_128_SHA 0x0020
-#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS11_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
-
-
-/* For TLS 1.2 - OP_PCLID_TLS12 */
-#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS12_RC4_128_MD5 0x0024
-#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS12_RC4_40_MD5 0x002b
-#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS12_RC4_128_SHA 0x0020
-#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS12_RC4_40_SHA 0x0028
-
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
-
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
-
-/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
-
-/* For DTLS - OP_PCLID_DTLS */
-
-#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
-#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
-#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
-
-#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
-#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
-#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
-#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
-#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
-#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
-#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
-#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+/*
+ * For SSL/TLS/DTLS - OP_PCL_TLS
+ * For more details see IANA TLS Cipher Suite registry:
+ * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
+ * Note: for private/internal use (reserved by IANA) - OP_PCL_PVT_TLS
+ */
+#define OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5 0x0003
+#define OP_PCL_TLS_RSA_WITH_RC4_128_MD5 0x0004
+#define OP_PCL_TLS_RSA_WITH_RC4_128_SHA 0x0005
+#define OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS_RSA_WITH_DES_CBC_SHA 0x0009
+#define OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA 0x000a
+#define OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA 0x000b
+#define OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA 0x000c
+#define OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA 0x000d
+#define OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA 0x000e
+#define OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA 0x000f
+#define OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA 0x0010
+#define OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA 0x0011
+#define OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA 0x0012
+#define OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA 0x0013
+#define OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0014
+#define OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA 0x0015
+#define OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA 0x0016
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 0x0017
+#define OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5 0x0018
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA 0x0019
+#define OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA 0x001a
+#define OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA 0x001b
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_SHA 0x0020
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 0x0023
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5 0x0022
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_MD5 0x0024
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA 0x0026
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA 0x0028
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 0x0029
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 0x002b
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA 0x0030
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA 0x0031
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA 0x0032
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA 0x0033
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA 0x0034
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA 0x0036
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA 0x0037
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA 0x0038
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA 0x0039
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA 0x003a
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256 0x003c
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256 0x003d
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 0x003e
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 0x003f
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 0x0040
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 0x0067
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 0x0068
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 0x0069
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 0x006a
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 0x006b
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256 0x006c
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256 0x006d
+#define OP_PCL_TLS_PSK_WITH_RC4_128_SHA 0x008a
+#define OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA 0x008b
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA 0x008c
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA 0x008d
+#define OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA 0x008e
+#define OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA 0x008f
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA 0x0090
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA 0x0091
+#define OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA 0x0092
+#define OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA 0x0093
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA 0x0094
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA 0x0095
+#define OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256 0x009c
+#define OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384 0x009d
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 0x009e
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 0x009f
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 0x00a0
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 0x00a1
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 0x00a2
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 0x00a3
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 0x00a4
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 0x00a5
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256 0x00a6
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384 0x00a7
+#define OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256 0x00a8
+#define OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384 0x00a9
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 0x00aa
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 0x00ab
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 0x00ac
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 0x00ad
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256 0x00ae
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384 0x00af
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 0x00b2
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 0x00b3
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 0x00b6
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 0x00b7
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA 0xc002
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc003
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA 0xc004
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA 0xc005
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA 0xc007
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc008
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA 0xc009
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA 0xc00a
+#define OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA 0xc00c
+#define OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA 0xc00d
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA 0xc00e
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA 0xc00f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA 0xc011
+#define OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA 0xc012
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA 0xc013
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA 0xc014
+#define OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA 0xc016
+#define OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA 0xc017
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA 0xc018
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA 0xc019
+#define OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0xc01a
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0xc01b
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0xc01c
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA 0xc01d
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA 0xc01e
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA 0xc01f
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA 0xc020
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0xc021
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0xc022
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 0xc023
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 0xc024
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 0xc025
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 0xc026
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 0xc027
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 0xc028
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 0xc029
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 0xc02a
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 0xc02b
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 0xc02c
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 0xc02d
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 0xc02e
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 0xc02f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 0xc030
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 0xc031
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 0xc032
+#define OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA 0xc033
+#define OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA 0xc034
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA 0xc035
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA 0xc036
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 0xc037
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 0xc038
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF 0xffff
/* 802.16 WiMAX protinfos */
#define OP_PCL_WIMAX_OFDM 0x0201
@@ -1332,7 +868,7 @@
#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
-#define OP_PCL_LTE_MIXED_ENC_MASK (3 < OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 << OP_PCL_LTE_MIXED_ENC_SHIFT)
#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
OP_PCL_LTE_MIXED_AUTH_SHIFT)
#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index 91f3e067..febcb6d0 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -410,6 +410,35 @@ cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
}
/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
* cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
* @descbuf: pointer to descriptor-under-construction buffer
* @ps: if 36/40bit addressing is desired, this parameter must be true
@@ -614,33 +643,4 @@ cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
return PROGRAM_FINALIZE(p);
}
-/**
- * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
- * @descbuf: pointer to descriptor-under-construction buffer
- * @swap: must be true when core endianness doesn't match SEC endianness
- *
- * Return: size of descriptor written in words or negative number on error
- */
-static inline int
-cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
-{
- struct program prg;
- struct program *p = &prg;
-
- PROGRAM_CNTXT_INIT(p, descbuf, 0);
- if (swap)
- PROGRAM_SET_BSWAP(p);
-
- SHR_HDR(p, SHR_ALWAYS, 1, 0);
-
- MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
- ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
- OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
- OP_ALG_AS_FINALIZE, 0, DIR_ENC);
- SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
- SEQSTORE(p, CONTEXT2, 0, 4, 0);
-
- return PROGRAM_FINALIZE(p);
-}
-
#endif /* __DESC_ALGO_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index 35cc02a6..d256a391 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -522,44 +522,133 @@ enum ipsec_icv_size {
/*
* IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ * IPSEC_N_* defines are for IPsec new mode.
*/
-#define IPSEC_DECO_DPOVRD_USE 0x80
+/**
+ * IPSEC_DPOVRD_USE - DPOVRD will override values specified in the PDB
+ */
+#define IPSEC_DPOVRD_USE BIT(31)
-struct ipsec_deco_dpovrd {
- uint8_t ovrd_ecn;
- uint8_t ip_hdr_len;
- uint8_t nh_offset;
- union {
- uint8_t next_header; /* next header if encap */
- uint8_t rsvd; /* reserved if decap */
- };
-};
+/**
+ * IPSEC_DPOVRD_ECN_SHIFT - Explicit Congestion Notification
+ *
+ * If set, MSB of the 4 bits indicates that the 2 LSBs will replace the ECN bits
+ * in the IP header.
+ */
+#define IPSEC_DPOVRD_ECN_SHIFT 24
-struct ipsec_new_encap_deco_dpovrd {
-#define IPSEC_NEW_ENCAP_DECO_DPOVRD_USE 0x8000
- uint16_t ovrd_ip_hdr_len; /* OVRD + outer IP header material
- * length
- */
-#define IPSEC_NEW_ENCAP_OIMIF 0x80
- uint8_t oimif_aoipho; /* OIMIF + actual outer IP header
- * offset
- */
- uint8_t rsvd;
-};
+/**
+ * IPSEC_DPOVRD_ECN_MASK - See IPSEC_DPOVRD_ECN_SHIFT
+ */
+#define IPSEC_DPOVRD_ECN_MASK (0xf << IPSEC_ENCAP_DPOVRD_ECN_SHIFT)
-struct ipsec_new_decap_deco_dpovrd {
- uint8_t ovrd;
- uint8_t aoipho_hi; /* upper nibble of actual outer IP
- * header
- */
- uint16_t aoipho_lo_ip_hdr_len; /* lower nibble of actual outer IP
- * header + outer IP header material
- */
-};
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_SHIFT - The length (in bytes) of the portion of the
+ * IP header that is not encrypted
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_SHIFT 16
+
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_MASK - See IPSEC_DPOVRD_IP_HDR_LEN_SHIFT
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_MASK (0xff << IPSEC_DPOVRD_IP_HDR_LEN_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_SHIFT - The location of the next header field within
+ * the IP header of the transport mode packet
+ *
+ * Encap:
+ * ESP_Trailer_NH <-- IP_Hdr[DPOVRD[NH_OFFSET]]
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- DPOVRD[NH]
+ *Decap:
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- ESP_Trailer_NH
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_SHIFT 8
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_MASK (0xff << IPSEC_DPOVRD_NH_OFFSET_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ * Valid only for encapsulation.
+ */
+#define IPSEC_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT - Outer IP header Material length (encap)
+ * Valid only if L2_COPY is not set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT 16
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK \
+ (0xfff << IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT - L2 header length
+ * Valid only if L2_COPY is set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT 16
-static inline void
-__gen_auth_key(struct program *program, struct alginfo *authdata)
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK \
+ (0xff << IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIMIF - Outer IP header Material in Input Frame
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIMIF BIT(15)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_COPY - L2 header present in input frame
+ *
+ * Note: For Era <= 8, this bit is reserved (not used) by HW.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_COPY BIT(14)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (encap)
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT 8
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK \
+ (0x3c << IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_NH_MASK - Next Header
+ *
+ * Used in the Next Header field of the encapsulated payload.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT 12
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK \
+ (0xff << IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK - Outer IP header Material length (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK 0xfff
+
+static inline void __gen_auth_key(struct program *program,
+ struct alginfo *authdata)
{
uint32_t dkp_protid;
@@ -603,6 +692,7 @@ __gen_auth_key(struct program *program, struct alginfo *authdata)
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -621,6 +711,7 @@ __gen_auth_key(struct program *program, struct alginfo *authdata)
*/
static inline int
cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
struct ipsec_encap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -638,7 +729,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
SET_LABEL(p, hdr);
@@ -669,6 +760,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -687,6 +779,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
*/
static inline int
cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
struct ipsec_decap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -704,7 +797,7 @@ cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
SET_LABEL(p, hdr);
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
@@ -1040,7 +1133,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* layers to determine whether Outer IP Header and/or keys can be inlined or
* not. To be used as first parameter of rta_inline_query().
*/
-#define IPSEC_NEW_ENC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (12 * CAAM_CMD_SZ + \
sizeof(struct ipsec_encap_pdb))
/**
@@ -1052,7 +1145,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* layers to determine whether Outer IP Header and/or key can be inlined or
* not. To be used as first parameter of rta_inline_query().
*/
-#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (11 * CAAM_CMD_SZ + \
sizeof(struct ipsec_encap_pdb))
/**
@@ -1061,6 +1154,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -1080,11 +1174,21 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* compute MDHA on the fly in HW.
* Valid algorithm values - one of OP_PCL_IPSEC_*
*
+ * Note: L2 header copy functionality is implemented assuming that bits 14
+ * (currently reserved) and 16-23 (part of Outer IP Header Material Length)
+ * in DPOVRD register are not used (which is usually the case when L3 header
+ * is provided in PDB).
+ * When DPOVRD[14] is set, frame starts with an L2 header; in this case, the
+ * L2 header length is found at DPOVRD[23:16]. SEC uses this length to copy
+ * the header and then it deletes DPOVRD[23:16] (so there is no side effect
+ * when later running IPsec protocol).
+ *
* Return: size of descriptor written in words or negative number on error
*/
static inline int
cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
bool swap,
+ enum rta_share_type share,
struct ipsec_encap_pdb *pdb,
uint8_t *opt_ip_hdr,
struct alginfo *cipherdata,
@@ -1097,6 +1201,8 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
REFERENCE(pkeyjmp);
LABEL(hdr);
REFERENCE(phdr);
+ LABEL(l2copy);
+ REFERENCE(pl2copy);
if (rta_sec_era < RTA_SEC_ERA_8) {
pr_err("IPsec new mode encap: available only for Era %d or above\n",
@@ -1109,7 +1215,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
@@ -1128,6 +1234,16 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
}
SET_LABEL(p, hdr);
+ MATHB(p, DPOVRD, AND, IPSEC_N_ENCAP_DPOVRD_L2_COPY, NONE, 4, IMMED2);
+ pl2copy = JUMP(p, l2copy, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+ MATHI(p, DPOVRD, RSHIFT, IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT, VSEQOUTSZ,
+ 1, 0);
+ MATHB(p, DPOVRD, AND, ~IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK, DPOVRD, 4,
+ IMMED2);
+ /* TODO: CLASS2 corresponds to AUX=2'b10; add more intuitive defines */
+ SEQFIFOSTORE(p, METADATA, 0, 0, CLASS2 | VLF);
+ SET_LABEL(p, l2copy);
+
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
if (authdata->keylen)
__gen_auth_key(p, authdata);
@@ -1138,6 +1254,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
OP_PCLID_IPSEC_NEW,
(uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pl2copy, l2copy);
PATCH_JUMP(p, pkeyjmp, keyjmp);
PATCH_HDR(p, phdr, hdr);
return PROGRAM_FINALIZE(p);
@@ -1171,6 +1288,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -1188,6 +1306,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
static inline int
cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
bool swap,
+ enum rta_share_type share,
struct ipsec_decap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -1211,7 +1330,7 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
SET_LABEL(p, hdr);
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
new file mode 100644
index 00000000..719ef605
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
@@ -0,0 +1,2796 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+
+ */
+
+#ifndef __DESC_PDCP_H__
+#define __DESC_PDCP_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: PDCP Shared Descriptor Constructors
+ *
+ * Shared descriptors for PDCP protocol.
+ */
+
+/**
+ * PDCP_NULL_MAX_FRAME_LEN - The maximum frame frame length that is supported by
+ * PDCP NULL protocol.
+ */
+#define PDCP_NULL_MAX_FRAME_LEN 0x00002FFF
+
+/**
+ * PDCP_MAC_I_LEN - The length of the MAC-I for PDCP protocol operation
+ */
+#define PDCP_MAC_I_LEN 0x00000004
+
+/**
+ * PDCP_MAX_FRAME_LEN_STATUS - The status returned in FD status/command field in
+ * case the input frame is larger than
+ * PDCP_NULL_MAX_FRAME_LEN.
+ */
+#define PDCP_MAX_FRAME_LEN_STATUS 0xF1
+
+/**
+ * PDCP_C_PLANE_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the PDCP
+ * Control Plane header. For PDCP Control Plane, the SN
+ * is constant (5 bits) as opposed to PDCP Data Plane
+ * (7/12/15 bits).
+ */
+#define PDCP_C_PLANE_SN_MASK 0x1F000000
+#define PDCP_C_PLANE_SN_MASK_BE 0x0000001F
+
+/**
+ * PDCP_U_PLANE_15BIT_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the
+ * PDCP User Plane header. For PDCP Control Plane,
+ * the SN is constant (5 bits) as opposed to PDCP
+ * Data Plane (7/12/15 bits).
+ */
+#define PDCP_U_PLANE_15BIT_SN_MASK 0xFF7F0000
+#define PDCP_U_PLANE_15BIT_SN_MASK_BE 0x00007FFF
+
+/**
+ * PDCP_BEARER_MASK - This mask is used masking out the bearer for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Count-C (32 bit) | Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the upper 64 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The lower 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_BEARER_MASK 0x00000004FFFFFFFFull
+#define PDCP_BEARER_MASK_BE 0xFFFFFFFF04000000ull
+
+/**
+ * PDCP_DIR_MASK - This mask is used masking out the direction for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the lower 32 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The upper 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_DIR_MASK 0x00000000000000F8ull
+#define PDCP_DIR_MASK_BE 0xF800000000000000ull
+
+/**
+ * PDCP_NULL_INT_MAC_I_VAL - The value of the PDCP PDU MAC-I in case NULL
+ * integrity is used.
+ */
+
+#define PDCP_NULL_INT_MAC_I_VAL 0x00000000
+
+/**
+ * PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS - The status used to report ICV check
+ * failed in case of NULL integrity
+ * Control Plane processing.
+ */
+#define PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS 0x0A
+/**
+ * PDCP_DPOVRD_HFN_OV_EN - Value to be used in the FD status/cmd field to
+ * indicate the HFN override mechanism is active for the
+ * frame.
+ */
+#define PDCP_DPOVRD_HFN_OV_EN 0x80000000
+
+/**
+ * PDCP_P4080REV2_HFN_OV_BUFLEN - The length in bytes of the supplementary space
+ * that must be provided by the user at the
+ * beginning of the input frame buffer for
+ * P4080 REV 2.
+ *
+ * The format of the frame buffer is the following:
+ *
+ * |<---PDCP_P4080REV2_HFN_OV_BUFLEN-->|
+ * //===================================||============||==============\\
+ * || PDCP_DPOVRD_HFN_OV_EN | HFN value || PDCP Header|| PDCP Payload ||
+ * \\===================================||============||==============//
+ *
+ * If HFN override mechanism is not desired, then the MSB of the first 4 bytes
+ * must be set to 0b.
+ */
+#define PDCP_P4080REV2_HFN_OV_BUFLEN 4
+
+/**
+ * enum cipher_type_pdcp - Type selectors for cipher types in PDCP protocol OP
+ * instructions.
+ * @PDCP_CIPHER_TYPE_NULL: NULL
+ * @PDCP_CIPHER_TYPE_SNOW: SNOW F8
+ * @PDCP_CIPHER_TYPE_AES: AES
+ * @PDCP_CIPHER_TYPE_ZUC: ZUCE
+ * @PDCP_CIPHER_TYPE_INVALID: invalid option
+ */
+enum cipher_type_pdcp {
+ PDCP_CIPHER_TYPE_NULL,
+ PDCP_CIPHER_TYPE_SNOW,
+ PDCP_CIPHER_TYPE_AES,
+ PDCP_CIPHER_TYPE_ZUC,
+ PDCP_CIPHER_TYPE_INVALID
+};
+
+/**
+ * enum auth_type_pdcp - Type selectors for integrity types in PDCP protocol OP
+ * instructions.
+ * @PDCP_AUTH_TYPE_NULL: NULL
+ * @PDCP_AUTH_TYPE_SNOW: SNOW F9
+ * @PDCP_AUTH_TYPE_AES: AES CMAC
+ * @PDCP_AUTH_TYPE_ZUC: ZUCA
+ * @PDCP_AUTH_TYPE_INVALID: invalid option
+ */
+enum auth_type_pdcp {
+ PDCP_AUTH_TYPE_NULL,
+ PDCP_AUTH_TYPE_SNOW,
+ PDCP_AUTH_TYPE_AES,
+ PDCP_AUTH_TYPE_ZUC,
+ PDCP_AUTH_TYPE_INVALID
+};
+
+/**
+ * enum pdcp_dir - Type selectors for direction for PDCP protocol
+ * @PDCP_DIR_UPLINK: uplink direction
+ * @PDCP_DIR_DOWNLINK: downlink direction
+ * @PDCP_DIR_INVALID: invalid option
+ */
+enum pdcp_dir {
+ PDCP_DIR_UPLINK = 0,
+ PDCP_DIR_DOWNLINK = 1,
+ PDCP_DIR_INVALID
+};
+
+/**
+ * enum pdcp_plane - PDCP domain selectors
+ * @PDCP_CONTROL_PLANE: Control Plane
+ * @PDCP_DATA_PLANE: Data Plane
+ * @PDCP_SHORT_MAC: Short MAC
+ */
+enum pdcp_plane {
+ PDCP_CONTROL_PLANE,
+ PDCP_DATA_PLANE,
+ PDCP_SHORT_MAC
+};
+
+/**
+ * enum pdcp_sn_size - Sequence Number Size selectors for PDCP protocol
+ * @PDCP_SN_SIZE_5: 5bit sequence number
+ * @PDCP_SN_SIZE_7: 7bit sequence number
+ * @PDCP_SN_SIZE_12: 12bit sequence number
+ * @PDCP_SN_SIZE_15: 15bit sequence number
+ * @PDCP_SN_SIZE_18: 18bit sequence number
+ */
+enum pdcp_sn_size {
+ PDCP_SN_SIZE_5 = 5,
+ PDCP_SN_SIZE_7 = 7,
+ PDCP_SN_SIZE_12 = 12,
+ PDCP_SN_SIZE_15 = 15
+};
+
+/*
+ * PDCP Control Plane Protocol Data Blocks
+ */
+#define PDCP_C_PLANE_PDB_HFN_SHIFT 5
+#define PDCP_C_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_C_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_C_PLANE_PDB_HFN_THR_SHIFT 5
+
+#define PDCP_U_PLANE_PDB_OPT_SHORT_SN 0x2
+#define PDCP_U_PLANE_PDB_OPT_15B_SN 0x4
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT 15
+#define PDCP_U_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_U_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT 15
+
+struct pdcp_pdb {
+ union {
+ uint32_t opt;
+ uint32_t rsvd;
+ } opt_res;
+ uint32_t hfn_res; /* HyperFrame number,(27, 25 or 21 bits),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t bearer_dir_res;/* Bearer(5 bits), packet direction (1 bit),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t hfn_thr_res; /* HyperFrame number threshold (27, 25 or 21
+ * bits), left aligned & right-padded with
+ * zeros.
+ */
+};
+
+/*
+ * PDCP internal PDB types
+ */
+enum pdb_type_e {
+ PDCP_PDB_TYPE_NO_PDB,
+ PDCP_PDB_TYPE_FULL_PDB,
+ PDCP_PDB_TYPE_REDUCED_PDB,
+ PDCP_PDB_TYPE_INVALID
+};
+
+/*
+ * Function for appending the portion of a PDCP Control Plane shared descriptor
+ * which performs NULL encryption and integrity (i.e. copies the input frame
+ * to the output frame, appending 32 bits of zeros at the end (MAC-I for
+ * NULL integrity).
+ */
+static inline int
+pdcp_insert_cplane_null_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ } else {
+ MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
+ IMMED2);
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
+ }
+
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+ else
+ MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /* Placeholder for MOVE command with length from M1 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+insert_copy_frame_op(struct program *p,
+ struct alginfo *cipherdata __maybe_unused,
+ unsigned int dir __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IFB | IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M0 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_int_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata, unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_SNOW:
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK, MATH2, 8,
+ IMMED2);
+ MOVEB(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK, MATH3, 8, IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK_BE, MATH2, 8,
+ IMMED2);
+
+ MOVE(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK_BE, MATH3, 8,
+ IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /*
+ * Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ /* Insert Auth Key */
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /*
+ * Placeholder for MOVE command with length from
+ * M1 register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
+ else
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ SEQINPTR(p, 0, 1, RTO);
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+ }
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "pdcp_insert_cplane_int_only_op", authdata->algtype);
+ return -EINVAL;
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_enc_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ (uint16_t)cipherdata->algtype << 8);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVE(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_cplane_enc_only_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ FIFOLOAD(p, MSG1, PDCP_NULL_INT_MAC_I_VAL, 4,
+ LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, OFIFO, 0, MATH1, 4, PDCP_MAC_I_LEN, WAITCOMP | IMMED);
+ MATHB(p, MATH1, XOR, PDCP_NULL_INT_MAC_I_VAL, NONE, 4, IMMED2);
+ JUMP(p, PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS,
+ HALT_STATUS, ALL_FALSE, MATH_Z);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_acc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_hfn_ovrd __maybe_unused)
+{
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL, (uint16_t)cipherdata->algtype);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ LABEL(back_to_sd_offset);
+ LABEL(end_desc);
+ LABEL(local_offset);
+ LABEL(jump_to_beginning);
+ LABEL(fifo_load_mac_i_offset);
+ REFERENCE(seqin_ptr_read);
+ REFERENCE(seqin_ptr_write);
+ REFERENCE(seq_out_read);
+ REFERENCE(jump_back_to_sd_cmd);
+ REFERENCE(move_mac_i_to_desc_buf);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ /*
+ * Note: Although the calculations below might seem a
+ * little off, the logic is the following:
+ *
+ * - SEQ IN PTR RTO below needs the full length of the
+ * frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
+ * this means the length of the frame to be processed
+ * + 4 bytes (the HFN override flag and value).
+ * The length of the frame to be processed minus 1
+ * byte is in the VSIL register (because
+ * VSIL = SIL + 3, due to 1 byte, the header being
+ * already written by the SEQ STORE above). So for
+ * calculating the length to use in RTO, I add one
+ * to the VSIL value in order to obtain the total
+ * frame length. This helps in case of P4080 which
+ * can have the value 0 as an operand in a MATH
+ * command only as SRC1 When the HFN override
+ * workaround is not enabled, the length of the
+ * frame is given by the SIL register; the
+ * calculation is similar to the one in the SEC 4.2
+ * and SEC 5.3 cases.
+ */
+ if (era_2_sw_hfn_ovrd)
+ MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
+ 0);
+ else
+ MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
+ 0);
+ }
+ /*
+ * Placeholder for filling the length in
+ * SEQIN PTR RTO below
+ */
+ seqin_ptr_read = MOVE(p, DESCBUF, 0, MATH1, 0, 6, IMMED);
+ seqin_ptr_write = MOVE(p, MATH1, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, local_offset);
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, 0, RTO);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ SEQFIFOLOAD(p, SKIP, 5, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
+ }
+
+ MATHB(p, SEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0))
+ SEQFIFOLOAD(p, SKIP, 1, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ PATCH_MOVE(p, seqin_ptr_read, local_offset);
+ PATCH_MOVE(p, seqin_ptr_write, local_offset);
+ } else {
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_5)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load a
+ * command that is also written by RTA (or patch it for that matter).
+ * Change when proper RTA support is added.
+ */
+ if (p->ps)
+ WORD(p, 0x168B0004);
+ else
+ WORD(p, 0x16880404);
+
+ jump_back_to_sd_cmd = JUMP(p, 0, LOCAL_JUMP, ALL_TRUE, 0);
+ /*
+ * Placeholder for command reading the SEQ OUT command in
+ * JD. Done for rereading the decrypted data and performing
+ * the integrity check
+ */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 20,
+ WAITCOMP | IMMED);
+ else
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 16,
+ WAITCOMP | IMMED);
+
+ MATHB(p, MATH1, XOR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 4,
+ IMMED2);
+ /* Placeholder for overwriting the SEQ IN with SEQ OUT */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ MOVE(p, MATH1, 0, DESCBUF, 0, 24, IMMED);
+ else
+ MOVE(p, MATH1, 0, DESCBUF, 0, 20, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_4)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
+ 4, WAITCOMP | IMMED);
+ else
+ MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ /*
+ * Placeholder for jump in SD for executing the new SEQ IN PTR
+ * command (which is actually the old SEQ OUT PTR command
+ * copied over from JD.
+ */
+ SET_LABEL(p, jump_to_beginning);
+ JUMP(p, 1 - jump_to_beginning, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, back_to_sd_offset);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ /* Read the # of bytes written in the output buffer + 1 (HDR) */
+ MATHB(p, VSEQOUTSZ, ADD, ONE, VSEQINSZ, 4, 0);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era >= RTA_SEC_ERA_4) {
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ } else {
+ SET_LABEL(p, fifo_load_mac_i_offset);
+ FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
+ LAST1 | FLUSH1 | IMMED);
+ }
+
+ SET_LABEL(p, end_desc);
+
+ if (!p->ps) {
+ PATCH_MOVE(p, seq_out_read, end_desc + 1);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ } else {
+ PATCH_MOVE(p, seq_out_read, end_desc + 2);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVE(p, MATH1, 0, CONTEXT1, 16, 8, IMMED);
+ MOVE(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK), MATH2, 4,
+ IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK), MATH3, 4,
+ IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVE(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4, IMMED2);
+
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ else
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
+
+ if (rta_sec_era <= RTA_SEC_ERA_2) {
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
+ } else {
+ MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ SET_LABEL(p, keyjump);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT1, 16, 8, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVE(p, MATH1, 0, CONTEXT1, 0, 8, IMMED);
+ MOVE(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK), MATH3,
+ 4, IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVE(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ }
+
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ /*
+ * For SEC ERA 6, there's a problem with the OFIFO
+ * pointer, and thus it needs to be reset here before
+ * moving to M0.
+ */
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ /* Put ICV to M0 before sending it to C2 for comparison. */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+ MOVE(p, MATH0, 7, IFIFOAB1, 0, 1, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, PDCP_NULL_MAX_FRAME_LEN, RTO);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SEQFIFOLOAD(p, SKIP, 1, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQINPTR(p, 0, 0, SOP);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_uplane_15bit_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ unsigned int dir)
+{
+ int op;
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 6, 2, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_U_PLANE_15BIT_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_U_PLANE_15BIT_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ SEQSTORE(p, MATH0, 6, 2, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ op = dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC;
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVE(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_uplane_15bit_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ return 0;
+}
+
+/*
+ * Function for inserting the snippet of code responsible for creating
+ * the HFN override code via either DPOVRD or via the input frame.
+ */
+static inline int
+insert_hfn_ov_op(struct program *p,
+ uint32_t shift,
+ enum pdb_type_e pdb_type,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
+ uint16_t hfn_pdb_offset;
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
+ return 0;
+
+ switch (pdb_type) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ /*
+ * If there is no PDB, then HFN override mechanism does not
+ * make any sense, thus in this case the function will
+ * return the pointer to the current position in the
+ * descriptor buffer
+ */
+ return 0;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ hfn_pdb_offset = 4;
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ hfn_pdb_offset = 8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
+ } else {
+ SEQLOAD(p, MATH0, 4, 4, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
+ SEQSTORE(p, MATH0, 4, 4, 0);
+ }
+
+ if (rta_sec_era >= RTA_SEC_ERA_8)
+ JUMP(p, 6, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+ else
+ JUMP(p, 5, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+
+ MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8)
+ /*
+ * For ERA8, DPOVRD could be handled by the PROTOCOL command
+ * itself. For now, this is not done. Thus, clear DPOVRD here
+ * to alleviate any side-effects.
+ */
+ MATHB(p, DPOVRD, AND, ZERO, DPOVRD, 4, STL);
+
+ return 0;
+}
+
+/*
+ * PDCP Control PDB creation function
+ */
+static inline enum pdb_type_e
+cnstr_pdcp_c_plane_pdb(struct program *p,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct pdcp_pdb pdb;
+ enum pdb_type_e
+ pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ PDCP_PDB_TYPE_NO_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* AES CTR */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* ZUC-E */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ };
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ return PDCP_PDB_TYPE_FULL_PDB;
+ }
+
+ switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ break;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ __rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
+ __rta_out32(p,
+ (uint32_t)((bearer <<
+ PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction <<
+ PDCP_C_PLANE_PDB_DIR_SHIFT)));
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ break;
+
+ default:
+ return PDCP_PDB_TYPE_INVALID;
+ }
+
+ return pdb_mask[cipherdata->algtype][authdata->algtype];
+}
+
+/*
+ * PDCP UPlane PDB creation function
+ */
+static inline int
+cnstr_pdcp_u_plane_pdb(struct program *p,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn, unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold)
+{
+ struct pdcp_pdb pdb;
+ /* Read options from user */
+ /* Depending on sequence number length, the HFN and HFN threshold
+ * have different lengths.
+ */
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ pdb.opt_res.opt |= PDCP_U_PLANE_PDB_OPT_SHORT_SN;
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_12:
+ pdb.opt_res.opt &= (uint32_t)(~PDCP_U_PLANE_PDB_OPT_SHORT_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_15:
+ pdb.opt_res.opt = (uint32_t)(PDCP_U_PLANE_PDB_OPT_15B_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT;
+ break;
+
+ default:
+ pr_err("Invalid Sequence Number Size setting in PDB\n");
+ return -EINVAL;
+ }
+
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ return 0;
+}
+/**
+ * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int,
+ unsigned char __maybe_unused) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, PDCP_SN_SIZE_5, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_ENCAP_PROTOCOL,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_c_plane_decap - Function for creating a PDCP Control Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int, unsigned char) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, PDCP_SN_SIZE_5, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_DECAP_PROTOCOL,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_encap - Function for creating a PDCP User Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN ovrd for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+ if (cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
+ hfn_threshold)) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, PDCP_PDB_TYPE_FULL_PDB,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ (uint64_t)cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_15bit_op(p, swap, cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_decap - Function for creating a PDCP User Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+ if (cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
+ hfn_threshold)) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, PDCP_PDB_TYPE_FULL_PDB,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_15bit_op(p, swap, cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_short_mac - Function for creating a PDCP Short MAC
+ * descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t iv[3] = {0, 0, 0};
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_NULL:
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, MATH0, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_SNOW:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0x04000000) : 0x04000000;
+ iv[2] = swap ? swab32(0xF8000000) : 0xF8000000;
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, MATH0, 0, 8, IMMED | COPY);
+ MOVE(p, MATH0, 0, IFIFOAB1, 0, 8, IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "cnstr_shdsc_pdcp_short_mac", authdata->algtype);
+ return -EINVAL;
+ }
+
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_PDCP_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
index d9a5b0e5..cf8dfb91 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -14,178 +14,176 @@ static inline int
__rta_ssl_proto(uint16_t protoinfo)
{
switch (protoinfo) {
- case OP_PCL_SSL30_RC4_40_MD5_2:
- case OP_PCL_SSL30_RC4_128_MD5_2:
- case OP_PCL_SSL30_RC4_128_SHA_5:
- case OP_PCL_SSL30_RC4_40_MD5_3:
- case OP_PCL_SSL30_RC4_128_MD5_3:
- case OP_PCL_SSL30_RC4_128_SHA:
- case OP_PCL_SSL30_RC4_128_MD5:
- case OP_PCL_SSL30_RC4_40_SHA:
- case OP_PCL_SSL30_RC4_40_MD5:
- case OP_PCL_SSL30_RC4_128_SHA_2:
- case OP_PCL_SSL30_RC4_128_SHA_3:
- case OP_PCL_SSL30_RC4_128_SHA_4:
- case OP_PCL_SSL30_RC4_128_SHA_6:
- case OP_PCL_SSL30_RC4_128_SHA_7:
- case OP_PCL_SSL30_RC4_128_SHA_8:
- case OP_PCL_SSL30_RC4_128_SHA_9:
- case OP_PCL_SSL30_RC4_128_SHA_10:
- case OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA:
+ case OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA:
if (rta_sec_era == RTA_SEC_ERA_7)
return -EINVAL;
/* fall through if not Era 7 */
- case OP_PCL_SSL30_DES40_CBC_SHA:
- case OP_PCL_SSL30_DES_CBC_SHA_2:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_5:
- case OP_PCL_SSL30_DES40_CBC_SHA_2:
- case OP_PCL_SSL30_DES_CBC_SHA_3:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_6:
- case OP_PCL_SSL30_DES40_CBC_SHA_3:
- case OP_PCL_SSL30_DES_CBC_SHA_4:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_7:
- case OP_PCL_SSL30_DES40_CBC_SHA_4:
- case OP_PCL_SSL30_DES_CBC_SHA_5:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_8:
- case OP_PCL_SSL30_DES40_CBC_SHA_5:
- case OP_PCL_SSL30_DES_CBC_SHA_6:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_9:
- case OP_PCL_SSL30_DES40_CBC_SHA_6:
- case OP_PCL_SSL30_DES_CBC_SHA_7:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_10:
- case OP_PCL_SSL30_DES_CBC_SHA:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA:
- case OP_PCL_SSL30_DES_CBC_MD5:
- case OP_PCL_SSL30_3DES_EDE_CBC_MD5:
- case OP_PCL_SSL30_DES40_CBC_SHA_7:
- case OP_PCL_SSL30_DES40_CBC_MD5:
- case OP_PCL_SSL30_AES_128_CBC_SHA:
- case OP_PCL_SSL30_AES_128_CBC_SHA_2:
- case OP_PCL_SSL30_AES_128_CBC_SHA_3:
- case OP_PCL_SSL30_AES_128_CBC_SHA_4:
- case OP_PCL_SSL30_AES_128_CBC_SHA_5:
- case OP_PCL_SSL30_AES_128_CBC_SHA_6:
- case OP_PCL_SSL30_AES_256_CBC_SHA:
- case OP_PCL_SSL30_AES_256_CBC_SHA_2:
- case OP_PCL_SSL30_AES_256_CBC_SHA_3:
- case OP_PCL_SSL30_AES_256_CBC_SHA_4:
- case OP_PCL_SSL30_AES_256_CBC_SHA_5:
- case OP_PCL_SSL30_AES_256_CBC_SHA_6:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_2:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_3:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_4:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_5:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_2:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_3:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_4:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_5:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_6:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_6:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_2:
- case OP_PCL_SSL30_AES_128_CBC_SHA_7:
- case OP_PCL_SSL30_AES_256_CBC_SHA_7:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_3:
- case OP_PCL_SSL30_AES_128_CBC_SHA_8:
- case OP_PCL_SSL30_AES_256_CBC_SHA_8:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_4:
- case OP_PCL_SSL30_AES_128_CBC_SHA_9:
- case OP_PCL_SSL30_AES_256_CBC_SHA_9:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_1:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_1:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_2:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_2:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_3:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_3:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_4:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_4:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_5:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_5:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_6:
- case OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384:
- case OP_PCL_TLS_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_11:
- case OP_PCL_SSL30_AES_128_CBC_SHA_10:
- case OP_PCL_SSL30_AES_256_CBC_SHA_10:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_12:
- case OP_PCL_SSL30_AES_128_CBC_SHA_11:
- case OP_PCL_SSL30_AES_256_CBC_SHA_11:
- case OP_PCL_SSL30_AES_128_CBC_SHA_12:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_13:
- case OP_PCL_SSL30_AES_256_CBC_SHA_12:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_14:
- case OP_PCL_SSL30_AES_128_CBC_SHA_13:
- case OP_PCL_SSL30_AES_256_CBC_SHA_13:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_15:
- case OP_PCL_SSL30_AES_128_CBC_SHA_14:
- case OP_PCL_SSL30_AES_256_CBC_SHA_14:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_16:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_17:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_18:
- case OP_PCL_SSL30_AES_128_CBC_SHA_15:
- case OP_PCL_SSL30_AES_128_CBC_SHA_16:
- case OP_PCL_SSL30_AES_128_CBC_SHA_17:
- case OP_PCL_SSL30_AES_256_CBC_SHA_15:
- case OP_PCL_SSL30_AES_256_CBC_SHA_16:
- case OP_PCL_SSL30_AES_256_CBC_SHA_17:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS12_3DES_EDE_CBC_MD5:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA160:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA224:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA256:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA384:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA512:
- case OP_PCL_TLS12_AES_128_CBC_SHA160:
- case OP_PCL_TLS12_AES_128_CBC_SHA224:
- case OP_PCL_TLS12_AES_128_CBC_SHA256:
- case OP_PCL_TLS12_AES_128_CBC_SHA384:
- case OP_PCL_TLS12_AES_128_CBC_SHA512:
- case OP_PCL_TLS12_AES_192_CBC_SHA160:
- case OP_PCL_TLS12_AES_192_CBC_SHA224:
- case OP_PCL_TLS12_AES_192_CBC_SHA256:
- case OP_PCL_TLS12_AES_192_CBC_SHA512:
- case OP_PCL_TLS12_AES_256_CBC_SHA160:
- case OP_PCL_TLS12_AES_256_CBC_SHA224:
- case OP_PCL_TLS12_AES_256_CBC_SHA256:
- case OP_PCL_TLS12_AES_256_CBC_SHA384:
- case OP_PCL_TLS12_AES_256_CBC_SHA512:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA160:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA384:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA224:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA512:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA256:
- case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE:
- case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF:
+ case OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA384:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF:
return 0;
}
@@ -323,6 +321,12 @@ static const uint32_t proto_blob_flags[] = {
OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
};
@@ -556,7 +560,7 @@ static const struct proto_map proto_table[] = {
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
- {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
@@ -568,7 +572,7 @@ static const struct proto_map proto_table[] = {
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
- {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
index 6e666108..5357187f 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -497,6 +497,28 @@ __rta_out64(struct program *program, bool is_ext, uint64_t val)
}
}
+static inline void __rta_out_be64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_be32(program, upper_32_bits(val));
+ __rta_out_be32(program, lower_32_bits(val));
+ } else {
+ __rta_out_be32(program, lower_32_bits(val));
+ }
+}
+
+static inline void __rta_out_le64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_le32(program, lower_32_bits(val));
+ __rta_out_le32(program, upper_32_bits(val));
+ } else {
+ __rta_out_le32(program, lower_32_bits(val));
+ }
+}
+
static inline unsigned int
rta_word(struct program *program, uint32_t val)
{
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index de8ca970..87e0defd 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -6,6 +6,7 @@
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
+#include <fsl_dpopr.h>
#include <fsl_dpseci.h>
#include <fsl_dpseci_cmd.h>
@@ -116,11 +117,13 @@ int dpseci_create(struct fsl_mc_io *mc_io,
cmd_flags,
dprc_token);
cmd_params = (struct dpseci_cmd_create *)cmd.params;
- for (i = 0; i < DPSECI_PRIO_NUM; i++)
+ for (i = 0; i < 8; i++)
cmd_params->priorities[i] = cfg->priorities[i];
+ for (i = 0; i < 8; i++)
+ cmd_params->priorities2[i] = cfg->priorities[8 + i];
cmd_params->num_tx_queues = cfg->num_tx_queues;
cmd_params->num_rx_queues = cfg->num_rx_queues;
- cmd_params->options = cfg->options;
+ cmd_params->options = cpu_to_le32(cfg->options);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -302,7 +305,7 @@ int dpseci_get_attributes(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dpseci_rsp_get_attr *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
- attr->options = rsp_params->options;
+ attr->options = le32_to_cpu(rsp_params->options);
attr->num_tx_queues = rsp_params->num_tx_queues;
attr->num_rx_queues = rsp_params->num_rx_queues;
@@ -490,6 +493,8 @@ int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
attr->arc4_acc_num = rsp_params->arc4_acc_num;
attr->des_acc_num = rsp_params->des_acc_num;
attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
return 0;
}
@@ -569,6 +574,113 @@ int dpseci_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+/**
+ * dpseci_set_opr() - Set Order Restoration configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @options: Configuration mode options
+ * can be OPR_OPT_CREATE or OPR_OPT_RETIRE
+ * @cfg: Configuration options for the OPR
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg)
+{
+ struct dpseci_cmd_set_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_set_opr *)cmd.params;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->oloe = cfg->oloe;
+ cmd_params->oeane = cfg->oeane;
+ cmd_params->olws = cfg->olws;
+ cmd_params->oa = cfg->oa;
+ cmd_params->oprrws = cfg->oprrws;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_opr() - Retrieve Order Restoration config and query.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @cfg: Returned OPR configuration
+ * @qry: Returned OPR query
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry)
+{
+ struct dpseci_rsp_get_opr *rsp_params;
+ struct dpseci_cmd_get_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_get_opr *)cmd.params;
+ cmd_params->index = index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
+ cfg->oloe = rsp_params->oloe;
+ cfg->oeane = rsp_params->oeane;
+ cfg->olws = rsp_params->olws;
+ cfg->oa = rsp_params->oa;
+ cfg->oprrws = rsp_params->oprrws;
+ qry->rip = dpseci_get_field(rsp_params->flags, RIP);
+ qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
+ qry->nesn = le16_to_cpu(rsp_params->nesn);
+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
+ qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
+ qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
int dpseci_set_congestion_notification(
struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -604,6 +716,16 @@ int dpseci_set_congestion_notification(
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpseci_get_congestion_notification() - Get congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
int dpseci_get_congestion_notification(
struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 12ac005a..279e8f4d 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -20,7 +20,7 @@ struct fsl_mc_io;
/**
* Maximum number of Tx/Rx priorities per DPSECI object
*/
-#define DPSECI_PRIO_NUM 8
+#define DPSECI_MAX_QUEUE_NUM 16
/**
* All queues considered; see dpseci_set_rx_queue()
@@ -58,7 +58,7 @@ struct dpseci_cfg {
uint32_t options;
uint8_t num_tx_queues;
uint8_t num_rx_queues;
- uint8_t priorities[DPSECI_PRIO_NUM];
+ uint8_t priorities[DPSECI_MAX_QUEUE_NUM];
};
int dpseci_create(struct fsl_mc_io *mc_io,
@@ -259,6 +259,10 @@ int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
* implemented in this version of SEC.
* @aes_acc_num: The number of copies of the AES module that are
* implemented in this version of SEC.
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
**/
struct dpseci_sec_attr {
@@ -279,6 +283,8 @@ struct dpseci_sec_attr {
uint8_t arc4_acc_num;
uint8_t des_acc_num;
uint8_t aes_acc_num;
+ uint8_t ccha_acc_num;
+ uint8_t ptha_acc_num;
};
int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
@@ -316,6 +322,21 @@ int dpseci_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
uint16_t *minor_ver);
+
+int dpseci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg);
+
+int dpseci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry);
+
/**
* enum dpseci_congestion_unit - DPSECI congestion units
* @DPSECI_CONGESTION_UNIT_BYTES: bytes units
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index 26cef0f7..af3518a0 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -9,22 +9,25 @@
/* DPSECI Version */
#define DPSECI_VER_MAJOR 5
-#define DPSECI_VER_MINOR 1
+#define DPSECI_VER_MINOR 3
/* Command versioning */
#define DPSECI_CMD_BASE_VERSION 1
#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_BASE_VERSION_V3 3
#define DPSECI_CMD_ID_OFFSET 4
#define DPSECI_CMD_V1(id) \
((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION)
#define DPSECI_CMD_V2(id) \
((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V2)
+#define DPSECI_CMD_V3(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V3)
/* Command IDs */
#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
-#define DPSECI_CMDID_CREATE DPSECI_CMD_V2(0x909)
+#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
@@ -37,9 +40,10 @@
#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
-#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V1(0x198)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
-
+#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
+#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
@@ -63,6 +67,8 @@ struct dpseci_cmd_create {
uint8_t num_rx_queues;
uint8_t pad[6];
uint32_t options;
+ uint32_t pad2;
+ uint8_t priorities2[8];
};
struct dpseci_cmd_destroy {
@@ -152,6 +158,8 @@ struct dpseci_rsp_get_sec_attr {
uint8_t arc4_acc_num;
uint8_t des_acc_num;
uint8_t aes_acc_num;
+ uint8_t ccha_acc_num;
+ uint8_t ptha_acc_num;
};
struct dpseci_rsp_get_sec_counters {
@@ -169,6 +177,63 @@ struct dpseci_rsp_get_api_version {
uint16_t minor;
};
+struct dpseci_cmd_set_opr {
+ uint16_t pad0;
+ uint8_t index;
+ uint8_t options;
+ uint8_t pad1[7];
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+};
+
+struct dpseci_cmd_get_opr {
+ uint16_t pad;
+ uint8_t index;
+};
+
+#define DPSECI_RIP_SHIFT 0
+#define DPSECI_RIP_SIZE 1
+#define DPSECI_OPR_ENABLE_SHIFT 1
+#define DPSECI_OPR_ENABLE_SIZE 1
+#define DPSECI_TSEQ_NLIS_SHIFT 0
+#define DPSECI_TSEQ_NLIS_SIZE 1
+#define DPSECI_HSEQ_NLIS_SHIFT 0
+#define DPSECI_HSEQ_NLIS_SIZE 1
+
+struct dpseci_rsp_get_opr {
+ uint64_t pad0;
+ /* from LSB: rip:1 enable:1 */
+ uint8_t flags;
+ uint16_t pad1;
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+ uint16_t nesn;
+ uint16_t pad8;
+ uint16_t ndsn;
+ uint16_t pad2;
+ uint16_t ea_tseq;
+ /* only the LSB */
+ uint8_t tseq_nlis;
+ uint8_t pad3;
+ uint16_t ea_hseq;
+ /* only the LSB */
+ uint8_t hseq_nlis;
+ uint8_t pad4;
+ uint16_t ea_hptr;
+ uint16_t pad5;
+ uint16_t ea_tptr;
+ uint16_t pad6;
+ uint16_t opr_vid;
+ uint16_t pad7;
+ uint16_t opr_id;
+};
+
#define DPSECI_DEST_TYPE_SHIFT 0
#define DPSECI_DEST_TYPE_SIZE 4
#define DPSECI_CG_UNITS_SHIFT 4
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index 01afc587..8fa4827e 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
diff --git a/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
index 8591cc0b..0bfb986d 100644
--- a/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
+++ b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
@@ -2,3 +2,11 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_18.11 {
+ global:
+
+ dpaa2_sec_eventq_attach;
+ dpaa2_sec_eventq_detach;
+
+} DPDK_17.05;
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
index 9be44704..5ce95c23 100644
--- a/drivers/crypto/dpaa_sec/Makefile
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -11,7 +11,6 @@ LIB = librte_pmd_dpaa_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
-CFLAGS += -D _GNU_SOURCE
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
@@ -38,5 +37,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec.c
LDLIBS += -lrte_bus_dpaa
LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index f571050b..d83e7454 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -23,6 +23,7 @@
#include <rte_mbuf.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
+#include <rte_spinlock.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -106,6 +107,12 @@ dpaa_mem_vtop(void *vaddr)
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
+ void *va;
+
+ va = (void *)dpaax_iova_table_get_va(paddr);
+ if (likely(va))
+ return va;
+
return rte_mem_iova2virt(paddr);
}
@@ -274,6 +281,9 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
{
switch (ses->auth_alg) {
case RTE_CRYPTO_AUTH_NULL:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_NULL : 0;
ses->digest_length = 0;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
@@ -322,6 +332,9 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
{
switch (ses->cipher_alg) {
case RTE_CRYPTO_CIPHER_NULL:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_NULL : 0;
break;
case RTE_CRYPTO_CIPHER_AES_CBC:
alginfo_c->algtype =
@@ -359,6 +372,87 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
}
}
+/* prepare ipsec proto command block of the session */
+static int
+dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo cipherdata = {0}, authdata = {0};
+ struct sec_cdb *cdb = &ses->cdb;
+ int32_t shared_desc_len = 0;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ caam_cipher_alg(ses, &cipherdata);
+ if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported cipher alg");
+ return -ENOTSUP;
+ }
+
+ cipherdata.key = (size_t)ses->cipher_key.data;
+ cipherdata.keylen = ses->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &authdata);
+ if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported auth alg");
+ return -ENOTSUP;
+ }
+
+ authdata.key = (size_t)ses->auth_key.data;
+ authdata.keylen = ses->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = cipherdata.keylen;
+ cdb->sh_desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ cipherdata.key_type = RTA_DATA_IMM;
+ else {
+ cipherdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ authdata.key_type = RTA_DATA_IMM;
+ else {
+ authdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &cipherdata, &authdata);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->decap_pdb,
+ &cipherdata, &authdata);
+ }
+ return shared_desc_len;
+}
/* prepare command block of the session */
static int
@@ -376,7 +470,9 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
memset(cdb, 0, sizeof(struct sec_cdb));
- if (is_cipher_only(ses)) {
+ if (is_proto_ipsec(ses)) {
+ shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
+ } else if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
DPAA_SEC_ERR("not supported cipher alg");
@@ -484,28 +580,13 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
cdb->sh_desc[0] = 0;
cdb->sh_desc[1] = 0;
cdb->sh_desc[2] = 0;
- if (is_proto_ipsec(ses)) {
- if (ses->dir == DIR_ENC) {
- shared_desc_len = cnstr_shdsc_ipsec_new_encap(
- cdb->sh_desc,
- true, swap, &ses->encap_pdb,
- (uint8_t *)&ses->ip4_hdr,
- &alginfo_c, &alginfo_a);
- } else if (ses->dir == DIR_DEC) {
- shared_desc_len = cnstr_shdsc_ipsec_new_decap(
- cdb->sh_desc,
- true, swap, &ses->decap_pdb,
- &alginfo_c, &alginfo_a);
- }
- } else {
- /* Auth_only_len is set as 0 here and it will be
- * overwritten in fd for each packet.
- */
- shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
- true, swap, &alginfo_c, &alginfo_a,
- ses->iv.length, 0,
- ses->digest_length, ses->dir);
- }
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
}
if (shared_desc_len < 0) {
@@ -1445,20 +1526,26 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
nb_ops = loop;
goto send_pkts;
}
- if (unlikely(!ses->qp || ses->qp != qp)) {
- DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
- ses->qp, qp);
+ if (unlikely(!ses->qp)) {
if (dpaa_sec_attach_sess_q(qp, ses)) {
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
}
+ } else if (unlikely(ses->qp != qp)) {
+ DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+ " New qp = %p\n", ses->qp, qp);
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
}
auth_only_len = op->sym->auth.data.length -
op->sym->cipher.data.length;
if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
- if (is_auth_only(ses)) {
+ if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else if (is_auth_only(ses)) {
cf = build_auth_only(op, ses);
} else if (is_cipher_only(ses)) {
cf = build_cipher_only(op, ses);
@@ -1467,8 +1554,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
auth_only_len = ses->auth_only_len;
} else if (is_auth_cipher(ses)) {
cf = build_cipher_auth(op, ses);
- } else if (is_proto_ipsec(ses)) {
- cf = build_proto(op, ses);
} else {
DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
@@ -1760,6 +1845,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
DPAA_SEC_ERR("invalid session struct");
return -EINVAL;
}
+ memset(session, 0, sizeof(dpaa_sec_session));
/* Default IV length = 0 */
session->iv.length = 0;
@@ -1807,7 +1893,9 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
+ rte_spinlock_lock(&internals->lock);
session->inq = dpaa_sec_attach_rxq(internals);
+ rte_spinlock_unlock(&internals->lock);
if (session->inq == NULL) {
DPAA_SEC_ERR("unable to attach sec queue");
goto err1;
@@ -1888,111 +1976,86 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
{
struct dpaa_sec_dev_private *internals = dev->data->dev_private;
struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
- struct rte_crypto_auth_xform *auth_xform;
- struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
dpaa_sec_session *session = (dpaa_sec_session *)sess;
PMD_INIT_FUNC_TRACE();
+ memset(session, 0, sizeof(dpaa_sec_session));
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
cipher_xform = &conf->crypto_xform->cipher;
- auth_xform = &conf->crypto_xform->next->auth;
+ if (conf->crypto_xform->next)
+ auth_xform = &conf->crypto_xform->next->auth;
} else {
auth_xform = &conf->crypto_xform->auth;
- cipher_xform = &conf->crypto_xform->next->cipher;
+ if (conf->crypto_xform->next)
+ cipher_xform = &conf->crypto_xform->next->cipher;
}
session->proto_alg = conf->protocol;
- session->cipher_key.data = rte_zmalloc(NULL,
- cipher_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->cipher_key.data == NULL &&
- cipher_xform->key.length > 0) {
- DPAA_SEC_ERR("No Memory for cipher key");
- return -ENOMEM;
- }
- session->cipher_key.length = cipher_xform->key.length;
- session->auth_key.data = rte_zmalloc(NULL,
- auth_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->auth_key.data == NULL &&
- auth_xform->key.length > 0) {
- DPAA_SEC_ERR("No Memory for auth key");
- rte_free(session->cipher_key.data);
- return -ENOMEM;
+ if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->cipher_key.length = cipher_xform->key.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto out;
+ }
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
}
- session->auth_key.length = auth_xform->key.length;
- memcpy(session->cipher_key.data, cipher_xform->key.data,
- cipher_xform->key.length);
- memcpy(session->auth_key.data, auth_xform->key.data,
- auth_xform->key.length);
- switch (auth_xform->algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
- break;
- case RTE_CRYPTO_AUTH_MD5_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
- break;
- case RTE_CRYPTO_AUTH_AES_CMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
- break;
- case RTE_CRYPTO_AUTH_NULL:
+ if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_key.length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto out;
+ }
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
session->auth_alg = RTE_CRYPTO_AUTH_NULL;
- break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- case RTE_CRYPTO_AUTH_SHA1:
- case RTE_CRYPTO_AUTH_SHA256:
- case RTE_CRYPTO_AUTH_SHA512:
- case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_GMAC:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
- auth_xform->algo);
- goto out;
- default:
- DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
- auth_xform->algo);
- goto out;
- }
-
- switch (cipher_xform->algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
- break;
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
- case RTE_CRYPTO_CIPHER_3DES_ECB:
- case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
- cipher_xform->algo);
- goto out;
- default:
- DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
- cipher_xform->algo);
- goto out;
}
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
@@ -2020,7 +2083,8 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
PDBOPTS_ESP_OIHI_PDB_INL |
PDBOPTS_ESP_IVSRC |
- PDBHMO_ESP_ENCAP_DTTL;
+ PDBHMO_ESP_ENCAP_DTTL |
+ PDBHMO_ESP_SNR;
session->encap_pdb.spi = ipsec_xform->spi;
session->encap_pdb.ip_hdr_len = sizeof(struct ip);
@@ -2033,7 +2097,9 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
} else
goto out;
session->ctx_pool = internals->ctx_pool;
+ rte_spinlock_lock(&internals->lock);
session->inq = dpaa_sec_attach_rxq(internals);
+ rte_spinlock_unlock(&internals->lock);
if (session->inq == NULL) {
DPAA_SEC_ERR("unable to attach sec queue");
goto out;
@@ -2204,7 +2270,7 @@ dpaa_sec_capabilities_get(void *device __rte_unused)
return dpaa_sec_security_cap;
}
-struct rte_security_ops dpaa_sec_security_ops = {
+static const struct rte_security_ops dpaa_sec_security_ops = {
.session_create = dpaa_sec_security_session_create,
.session_update = NULL,
.session_stats_get = NULL,
@@ -2284,6 +2350,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
security_instance->sess_cnt = 0;
cryptodev->security_ctx = security_instance;
+ rte_spinlock_init(&internals->lock);
for (i = 0; i < internals->max_nb_queue_pairs; i++) {
/* init qman fq for queue pair */
qp = &internals->qps[i];
@@ -2316,7 +2383,7 @@ init_error:
}
static int
-cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
+cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
struct rte_dpaa_device *dpaa_dev)
{
struct rte_cryptodev *cryptodev;
@@ -2344,7 +2411,6 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
dpaa_dev->crypto_dev = cryptodev;
cryptodev->device = &dpaa_dev->device;
- cryptodev->device->driver = &dpaa_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index ac6c00a6..f4b87844 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -137,7 +137,7 @@ struct dpaa_sec_qp {
int tx_errs;
};
-#define RTE_DPAA_MAX_NB_SEC_QPS 8
+#define RTE_DPAA_MAX_NB_SEC_QPS 2
#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
@@ -150,6 +150,7 @@ struct dpaa_sec_dev_private {
unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
unsigned int max_nb_queue_pairs;
unsigned int max_nb_sessions;
+ rte_spinlock_t lock;
};
#define MAX_SG_ENTRIES 16
diff --git a/drivers/crypto/kasumi/meson.build b/drivers/crypto/kasumi/meson.build
new file mode 100644
index 00000000..a09b0e25
--- /dev/null
+++ b/drivers/crypto/kasumi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('libsso_kasumi', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('rte_kasumi_pmd.c', 'rte_kasumi_pmd_ops.c')
+deps += ['bus_vdev']
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index d64ca418..bf1bd928 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam',
- 'null', 'openssl', 'qat', 'virtio']
+drivers = ['aesni_gcm', 'aesni_mb', 'caam_jr', 'ccp', 'dpaa_sec', 'dpaa2_sec',
+ 'kasumi', 'mvsam', 'null', 'octeontx', 'openssl', 'qat', 'scheduler',
+ 'virtio', 'zuc']
std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
diff --git a/drivers/crypto/mvsam/Makefile b/drivers/crypto/mvsam/Makefile
index c3dc72c1..2b4d036c 100644
--- a/drivers/crypto/mvsam/Makefile
+++ b/drivers/crypto/mvsam/Makefile
@@ -19,6 +19,7 @@ LIB = librte_pmd_mvsam_crypto.a
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/mvep
CFLAGS += -I$(LIBMUSDK_PATH)/include
CFLAGS += -DMVCONF_TYPES_PUBLIC
CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
@@ -31,9 +32,9 @@ EXPORT_MAP := rte_pmd_mvsam_version.map
# external library dependencies
LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_kvargs
LDLIBS += -lrte_cryptodev
-LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_bus_vdev -lrte_common_mvep
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c
diff --git a/drivers/crypto/mvsam/meson.build b/drivers/crypto/mvsam/meson.build
index 3c8ea3cf..f1c87966 100644
--- a/drivers/crypto/mvsam/meson.build
+++ b/drivers/crypto/mvsam/meson.build
@@ -18,4 +18,4 @@ endif
sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c')
-deps += ['bus_vdev']
+deps += ['bus_vdev', 'common_mvep']
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c
index 73eff757..c2ae82a2 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -11,11 +11,11 @@
#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
+#include <rte_kvargs.h>
+#include <rte_mvep_common.h>
#include "rte_mrvl_pmd_private.h"
-#define MRVL_MUSDK_DMA_MEMSIZE 41943040
-
#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
@@ -68,6 +68,9 @@ __rte_aligned(32);
*/
static const
struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
+ [RTE_CRYPTO_CIPHER_NULL] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_NONE },
[RTE_CRYPTO_CIPHER_3DES_CBC] = {
.supported = ALGO_SUPPORTED,
.cipher_alg = SAM_CIPHER_3DES,
@@ -93,6 +96,11 @@ struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
.cipher_alg = SAM_CIPHER_AES,
.cipher_mode = SAM_CIPHER_CTR,
.max_key_len = BITS2BYTES(256) },
+ [RTE_CRYPTO_CIPHER_AES_ECB] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_ECB,
+ .max_key_len = BITS2BYTES(256) },
};
/**
@@ -100,6 +108,9 @@ struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
*/
static const
struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
+ [RTE_CRYPTO_AUTH_NULL] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_NONE },
[RTE_CRYPTO_AUTH_MD5_HMAC] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HMAC_MD5 },
@@ -112,6 +123,9 @@ struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
[RTE_CRYPTO_AUTH_SHA1] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HASH_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
[RTE_CRYPTO_AUTH_SHA224] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HASH_SHA2_224 },
@@ -210,7 +224,7 @@ mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
{
/* Make sure we've got proper struct */
if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
@@ -218,7 +232,7 @@ mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
(cipher_map[cipher_xform->cipher.algo].supported
!= ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
+ MRVL_LOG(ERR, "Cipher algorithm not supported!");
return -EINVAL;
}
@@ -238,7 +252,7 @@ mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
/* Get max key length. */
if (cipher_xform->cipher.key.length >
cipher_map[cipher_xform->cipher.algo].max_key_len) {
- MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ MRVL_LOG(ERR, "Wrong key length!");
return -EINVAL;
}
@@ -261,14 +275,14 @@ mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
{
/* Make sure we've got proper struct */
if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
/* See if map data is present and valid */
if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
(auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
+ MRVL_LOG(ERR, "Auth algorithm not supported!");
return -EINVAL;
}
@@ -300,7 +314,7 @@ mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
{
/* Make sure we've got proper struct */
if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
@@ -308,7 +322,7 @@ mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
(aead_map[aead_xform->aead.algo].supported
!= ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
+ MRVL_LOG(ERR, "AEAD algorithm not supported!");
return -EINVAL;
}
@@ -326,7 +340,7 @@ mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
/* Get max key length. */
if (aead_xform->aead.key.length >
aead_map[aead_xform->aead.algo].max_key_len) {
- MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ MRVL_LOG(ERR, "Wrong key length!");
return -EINVAL;
}
@@ -391,21 +405,21 @@ mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
if ((cipher_xform != NULL) &&
(mrvl_crypto_set_cipher_session_parameters(
sess, cipher_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported cipher parameters!");
return -EINVAL;
}
if ((auth_xform != NULL) &&
(mrvl_crypto_set_auth_session_parameters(
sess, auth_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported auth parameters!");
return -EINVAL;
}
if ((aead_xform != NULL) &&
(mrvl_crypto_set_aead_session_parameters(
sess, aead_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported aead parameters!");
return -EINVAL;
}
@@ -437,12 +451,14 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
struct rte_crypto_op *op)
{
struct mrvl_crypto_session *sess;
- struct rte_mbuf *dst_mbuf;
+ struct rte_mbuf *src_mbuf, *dst_mbuf;
+ uint16_t segments_nb;
uint8_t *digest;
+ int i;
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
- "oriented requests, op (%p) is sessionless.",
+ MRVL_LOG(ERR, "MRVL CRYPTO PMD only supports session "
+ "oriented requests, op (%p) is sessionless!",
op);
return -EINVAL;
}
@@ -450,39 +466,56 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
op->sym->session, cryptodev_driver_id);
if (unlikely(sess == NULL)) {
- MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
+ MRVL_LOG(ERR, "Session was not created for this device!");
return -EINVAL;
}
- /*
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+
+ src_mbuf = op->sym->m_src;
+ segments_nb = src_mbuf->nb_segs;
+ /* The following conditions must be met:
+ * - Destination buffer is required when segmented source buffer
+ * - Segmented destination buffer is not supported
+ */
+ if ((segments_nb > 1) && (!op->sym->m_dst)) {
+ MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
+ return -1;
+ }
+ /* For non SG case:
* If application delivered us null dst buffer, it means it expects
* us to deliver the result in src buffer.
*/
dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
- request->sa = sess->sam_sess;
- request->cookie = op;
-
- /* Single buffers only, sorry. */
- request->num_bufs = 1;
- request->src = src_bd;
- src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
- src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
- src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
-
- /* Empty source. */
- if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
- /* EIP does not support 0 length buffers. */
- MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
+ if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
+ MRVL_LOG(ERR, "Segmented destination buffer not supported!");
return -1;
}
+ request->num_bufs = segments_nb;
+ for (i = 0; i < segments_nb; i++) {
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(src_mbuf) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_LOG(ERR, "Buffer length == 0 not supported!");
+ return -1;
+ }
+ src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
+ src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
+ src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
+
+ src_mbuf = src_mbuf->next;
+ }
+ request->src = src_bd;
+
/* Empty destination. */
if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
/* Make dst buffer fit at least source data. */
if (rte_pktmbuf_append(dst_mbuf,
rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
- MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
+ MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
return -1;
}
}
@@ -527,7 +560,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
/*
* EIP supports only scenarios where ICV(digest buffer) is placed at
- * auth_icv_offset. Any other placement means risking errors.
+ * auth_icv_offset.
*/
if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
/*
@@ -536,17 +569,36 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
*/
if (rte_pktmbuf_mtod_offset(
dst_mbuf, uint8_t *,
- request->auth_icv_offset) == digest) {
+ request->auth_icv_offset) == digest)
return 0;
- }
} else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
/*
* EIP will look for digest at auth_icv_offset
- * offset in SRC buffer.
+ * offset in SRC buffer. It must be placed in the last
+ * segment and the offset must be set to reach digest
+ * in the last segment
*/
- if (rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- request->auth_icv_offset) == digest) {
+ struct rte_mbuf *last_seg = op->sym->m_src;
+ uint32_t d_offset = request->auth_icv_offset;
+ u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
+ unsigned char *d_ptr;
+
+ /* Find the last segment and the offset for the last segment */
+ while ((last_seg->next != NULL) &&
+ (d_offset >= last_seg->data_len)) {
+ d_offset -= last_seg->data_len;
+ last_seg = last_seg->next;
+ }
+
+ if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
+ d_offset) == digest)
+ return 0;
+
+ /* copy digest to last segment */
+ if (last_seg->buf_len >= (d_size + d_offset)) {
+ d_ptr = (unsigned char *)last_seg->buf_addr +
+ d_offset;
+ rte_memcpy(d_ptr, digest, d_size);
return 0;
}
}
@@ -582,11 +634,10 @@ mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
int ret;
struct sam_cio_op_params requests[nb_ops];
/*
- * DPDK uses single fragment buffers, so we can KISS descriptors.
* SAM does not store bd pointers, so on-stack scope will be enough.
*/
- struct sam_buf_info src_bd[nb_ops];
- struct sam_buf_info dst_bd[nb_ops];
+ struct mrvl_crypto_src_table src_bd[nb_ops];
+ struct sam_buf_info dst_bd[nb_ops];
struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
if (nb_ops == 0)
@@ -594,15 +645,17 @@ mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
/* Prepare the burst. */
memset(&requests, 0, sizeof(requests));
+ memset(&src_bd, 0, sizeof(src_bd));
/* Iterate through */
for (; iter_ops < nb_ops; ++iter_ops) {
+ /* store the op id for debug */
+ src_bd[iter_ops].iter_ops = iter_ops;
if (mrvl_request_prepare(&requests[iter_ops],
- &src_bd[iter_ops],
+ src_bd[iter_ops].src_bd,
&dst_bd[iter_ops],
ops[iter_ops]) < 0) {
- MRVL_CRYPTO_LOG_ERR(
- "Error while parameters preparation!");
+ MRVL_LOG(ERR, "Error while preparing parameters!");
qp->stats.enqueue_err_count++;
ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -680,12 +733,12 @@ mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
break;
case SAM_CIO_ERR_ICV:
- MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
+ MRVL_LOG(DEBUG, "CIO returned SAM_CIO_ERR_ICV.");
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
break;
default:
- MRVL_CRYPTO_LOG_DBG(
- "CIO returned Error: %d", results[i].status);
+ MRVL_LOG(DEBUG,
+ "CIO returned Error: %d.", results[i].status);
ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
break;
}
@@ -711,12 +764,12 @@ cryptodev_mrvl_crypto_create(const char *name,
struct rte_cryptodev *dev;
struct mrvl_crypto_private *internals;
struct sam_init_params sam_params;
- int ret;
+ int ret = -EINVAL;
dev = rte_cryptodev_pmd_create(name, &vdev->device,
&init_params->common);
if (dev == NULL) {
- MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ MRVL_LOG(ERR, "Failed to create cryptodev vdev!");
goto init_error;
}
@@ -729,7 +782,9 @@ cryptodev_mrvl_crypto_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_HW_ACCELERATED;
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
@@ -737,29 +792,26 @@ cryptodev_mrvl_crypto_create(const char *name,
internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
- /*
- * ret == -EEXIST is correct, it means DMA
- * has been already initialized.
- */
- ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
- if (ret < 0) {
- if (ret != -EEXIST)
- return ret;
-
- MRVL_CRYPTO_LOG_INFO(
- "DMA memory has been already initialized by a different driver.");
- }
+ ret = rte_mvep_init(MVEP_MOD_T_SAM, NULL);
+ if (ret)
+ goto init_error;
sam_params.max_num_sessions = internals->max_nb_sessions;
- return sam_init(&sam_params);
+ /* sam_set_debug_flags(3); */
+
+ ret = sam_init(&sam_params);
+ if (ret)
+ goto init_error;
+
+ return 0;
init_error:
- MRVL_CRYPTO_LOG_ERR(
- "driver %s: %s failed", init_params->common.name, __func__);
+ MRVL_LOG(ERR,
+ "Driver %s: %s failed!", init_params->common.name, __func__);
cryptodev_mrvl_crypto_uninit(vdev);
- return -EFAULT;
+ return ret;
}
/** Parse integer from integer argument */
@@ -771,7 +823,7 @@ parse_integer_arg(const char *key __rte_unused,
*i = atoi(value);
if (*i < 0) {
- MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
+ MRVL_LOG(ERR, "Argument has to be positive!");
return -EINVAL;
}
@@ -786,9 +838,8 @@ parse_name_arg(const char *key __rte_unused,
struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", value,
- RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ MRVL_LOG(ERR, "Invalid name %s, should be less than %u bytes!",
+ value, RTE_CRYPTODEV_NAME_MAX_LEN - 1);
return -EINVAL;
}
@@ -864,7 +915,7 @@ cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
.private_data_size =
sizeof(struct mrvl_crypto_private),
.max_nb_queue_pairs =
- sam_get_num_inst() * SAM_HW_RING_NUM,
+ sam_get_num_inst() * sam_get_num_cios(0),
.socket_id = rte_socket_id()
},
.max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
@@ -880,9 +931,8 @@ cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
ret = mrvl_pmd_parse_input_args(&init_params, args);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Failed to parse initialisation arguments[%s]\n",
- args);
+ MRVL_LOG(ERR, "Failed to parse initialisation arguments[%s]!",
+ args);
return -EINVAL;
}
@@ -904,11 +954,11 @@ cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD,
- "Closing Marvell crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ MRVL_LOG(INFO, "Closing Marvell crypto device %s on numa socket %u.",
+ name, rte_socket_id());
sam_deinit();
+ rte_mvep_deinit(MVEP_MOD_T_SAM);
cryptodev = rte_cryptodev_pmd_get_named_dev(name);
if (cryptodev == NULL)
@@ -935,3 +985,8 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(crypto_mrvl_init_log)
+{
+ mrvl_logtype_driver = rte_log_register("pmd.crypto.mvsam");
+}
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
index c045562c..9956f051 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -30,9 +30,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -50,9 +50,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -70,9 +70,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 12,
.max = 20,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -90,8 +90,29 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 20,
+ .min = 12,
.max = 20,
+ .increment = 4
+ },
+ }, }
+ }, }
+ },
+ {
+ /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
.increment = 0
},
}, }
@@ -110,9 +131,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 28,
+ .min = 12,
.max = 28,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -130,9 +151,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 12,
.max = 32,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -150,9 +171,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 32,
+ .min = 12,
.max = 32,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -170,9 +191,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 12,
.max = 48,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -190,9 +211,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 48,
+ .min = 12,
.max = 48,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -210,9 +231,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 64,
- .max = 64,
- .increment = 0
+ .min = 12,
+ .max = 48,
+ .increment = 4
},
}, }
}, }
@@ -230,8 +251,8 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 64,
- .max = 64,
+ .min = 12,
+ .max = 48,
.increment = 0
},
}, }
@@ -277,6 +298,26 @@ static const struct rte_cryptodev_capabilities
}, }
}, }
},
+ { /* AES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -372,6 +413,71 @@ static const struct rte_cryptodev_capabilities
}, }
}, }
},
+ { /* 3DES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_ECB,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -551,7 +657,7 @@ mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
*/
int num = sam_get_num_inst();
if (num == 0) {
- MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n");
+ MRVL_LOG(ERR, "No crypto engines detected!");
return -1;
}
@@ -635,7 +741,7 @@ mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
int ret;
if (sess == NULL) {
- MRVL_CRYPTO_LOG_ERR("Invalid session struct.");
+ MRVL_LOG(ERR, "Invalid session struct!");
return -EINVAL;
}
@@ -646,7 +752,7 @@ mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
ret = mrvl_crypto_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters.");
+ MRVL_LOG(ERR, "Failed to configure session parameters!");
/* Return session to mempool */
rte_mempool_put(mp, sess_private_data);
@@ -658,7 +764,7 @@ mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
if (sam_session_create(&mrvl_sess->sam_sess_params,
&mrvl_sess->sam_sess) < 0) {
- MRVL_CRYPTO_LOG_DBG("Failed to create session!");
+ MRVL_LOG(DEBUG, "Failed to create session!");
return -EIO;
}
@@ -686,7 +792,7 @@ mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
if (mrvl_sess->sam_sess &&
sam_session_destroy(mrvl_sess->sam_sess) < 0) {
- MRVL_CRYPTO_LOG_INFO("Error while destroying session!");
+ MRVL_LOG(ERR, "Error while destroying session!");
}
memset(sess, 0, sizeof(struct mrvl_crypto_session));
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_private.h b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
index c16d95b4..6f8cf562 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
@@ -12,32 +12,21 @@
#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
/**< Marvell PMD device name */
-#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG
-#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
- __func__, __LINE__, ## args)
-
-#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
- __func__, __LINE__, ## args)
-
-#else
-#define MRVL_CRYPTO_LOG_INFO(fmt, args...)
-#define MRVL_CRYPTO_LOG_DBG(fmt, args...)
-#endif
+/** MRVL PMD LOGTYPE DRIVER */
+int mrvl_logtype_driver;
+
+#define MRVL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, mrvl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/**
* Handy bits->bytes conversion macro.
*/
#define BITS2BYTES(x) ((x) >> 3)
+#define MRVL_MAX_SEGMENTS 16
+
/** The operation order mode enumerator. */
enum mrvl_crypto_chain_order {
MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
@@ -84,6 +73,11 @@ struct mrvl_crypto_session {
uint16_t cipher_iv_offset;
} __rte_cache_aligned;
+struct mrvl_crypto_src_table {
+ uint16_t iter_ops;
+ struct sam_buf_info src_bd[MRVL_MAX_SEGMENTS];
+} __rte_cache_aligned;
+
/** Set and validate MRVL crypto session parameters */
extern int
mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index bb2b6e14..2bdcd019 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -308,7 +308,7 @@ null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
}
}
-struct rte_cryptodev_ops pmd_ops = {
+static struct rte_cryptodev_ops pmd_ops = {
.dev_configure = null_crypto_pmd_config,
.dev_start = null_crypto_pmd_start,
.dev_stop = null_crypto_pmd_stop,
diff --git a/drivers/crypto/octeontx/Makefile b/drivers/crypto/octeontx/Makefile
new file mode 100644
index 00000000..2e78e69b
--- /dev/null
+++ b/drivers/crypto/octeontx/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_octeontx_crypto.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_common_cpt
+
+VPATH += $(RTE_SDK)/drivers/crypto/octeontx
+
+CFLAGS += -O3 -DCPT_MODEL=CRYPTO_OCTEONTX
+CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
+
+# PMD code
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_capabilities.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_hw_access.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# versioning export map
+EXPORT_MAP := rte_pmd_octeontx_crypto_version.map
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_cryptodev
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/octeontx/meson.build b/drivers/crypto/octeontx/meson.build
new file mode 100644
index 00000000..6511b402
--- /dev/null
+++ b/drivers/crypto/octeontx/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_pci']
+deps += ['common_cpt']
+name = 'octeontx_crypto'
+
+sources = files('otx_cryptodev.c',
+ 'otx_cryptodev_capabilities.c',
+ 'otx_cryptodev_hw_access.c',
+ 'otx_cryptodev_mbox.c',
+ 'otx_cryptodev_ops.c')
+
+includes += include_directories('../../common/cpt')
+cflags += '-DCPT_MODEL=CRYPTO_OCTEONTX'
diff --git a/drivers/crypto/octeontx/otx_cryptodev.c b/drivers/crypto/octeontx/otx_cryptodev.c
new file mode 100644
index 00000000..269f0456
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev.c
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+
+/* CPT common headers */
+#include "cpt_pmd_logs.h"
+
+#include "otx_cryptodev.h"
+#include "otx_cryptodev_ops.h"
+
+static int otx_cryptodev_logtype;
+
+static struct rte_pci_id pci_id_cpt_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID),
+ },
+ /* sentinel */
+ {
+ .device_id = 0
+ },
+};
+
+static void
+otx_cpt_logtype_init(void)
+{
+ cpt_logtype = otx_cryptodev_logtype;
+}
+
+static int
+otx_cpt_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ int retval;
+
+ if (pci_drv == NULL)
+ return -ENODEV;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ cryptodev = rte_cryptodev_pmd_allocate(name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ cryptodev->device = &pci_dev->device;
+ cryptodev->device->driver = &pci_drv->driver;
+ cryptodev->driver_id = otx_cryptodev_driver_id;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* init logtype used in common */
+ otx_cpt_logtype_init();
+
+ /* Invoke PMD device initialization function */
+ retval = otx_cpt_dev_create(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ CPT_LOG_ERR("[DRV %s]: Failed to create device "
+ "(vendor_id: 0x%x device_id: 0x%x",
+ pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+
+ return -ENXIO;
+}
+
+static int
+otx_cpt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ if (pci_dev->driver == NULL)
+ return -ENODEV;
+
+ /* free crypto device */
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->device = NULL;
+ cryptodev->device->driver = NULL;
+ cryptodev->data = NULL;
+
+ /* free metapool memory */
+ cleanup_global_resources();
+
+ return 0;
+}
+
+static struct rte_pci_driver otx_cryptodev_pmd = {
+ .id_table = pci_id_cpt_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = otx_cpt_pci_probe,
+ .remove = otx_cpt_pci_remove,
+};
+
+static struct cryptodev_driver otx_cryptodev_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_OCTEONTX_PMD, otx_cryptodev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_OCTEONTX_PMD, pci_id_cpt_table);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(otx_cryptodev_drv, otx_cryptodev_pmd.driver,
+ otx_cryptodev_driver_id);
+
+RTE_INIT(otx_cpt_init_log)
+{
+ /* Bus level logs */
+ otx_cryptodev_logtype = rte_log_register("pmd.crypto.octeontx");
+ if (otx_cryptodev_logtype >= 0)
+ rte_log_set_level(otx_cryptodev_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev.h b/drivers/crypto/octeontx/otx_cryptodev.h
new file mode 100644
index 00000000..6c2871d7
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_H_
+#define _OTX_CRYPTODEV_H_
+
+/* Cavium OCTEON TX crypto PMD device name */
+#define CRYPTODEV_NAME_OCTEONTX_PMD crypto_octeontx
+
+/* Device ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+#define CPT_81XX_PCI_VF_DEVICE_ID 0xa041
+
+/*
+ * Crypto device driver ID
+ */
+uint8_t otx_cryptodev_driver_id;
+
+#endif /* _OTX_CRYPTODEV_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_capabilities.c b/drivers/crypto/octeontx/otx_cryptodev_capabilities.c
new file mode 100644
index 00000000..946571cf
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_capabilities.c
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_cryptodev.h>
+
+#include "otx_cryptodev_capabilities.h"
+
+static const struct rte_cryptodev_capabilities otx_capabilities[] = {
+ /* Symmetric capabilities */
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ }, },
+ }, },
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 8,
+ .max = 64,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 8
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_ECB,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .block_size = 16,
+ .key_size = {
+ .min = 32,
+ .max = 64,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 1024,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ /* End of symmetric capabilities */
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+const struct rte_cryptodev_capabilities *
+otx_get_capabilities(void)
+{
+ return otx_capabilities;
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_capabilities.h b/drivers/crypto/octeontx/otx_cryptodev_capabilities.h
new file mode 100644
index 00000000..fc62821b
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_capabilities.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_CAPABILITIES_H_
+#define _OTX_CRYPTODEV_CAPABILITIES_H_
+
+#include <rte_cryptodev.h>
+
+/*
+ * Get capabilities list for the device
+ *
+ */
+const struct rte_cryptodev_capabilities *
+otx_get_capabilities(void);
+
+#endif /* _OTX_CRYPTODEV_CAPABILITIES_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.c b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
new file mode 100644
index 00000000..5e705a83
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_memzone.h>
+
+#include "otx_cryptodev_hw_access.h"
+#include "otx_cryptodev_mbox.h"
+
+#include "cpt_pmd_logs.h"
+#include "cpt_hw_types.h"
+
+/*
+ * VF HAL functions
+ * Access its own BAR0/4 registers by passing VF number as 0.
+ * OS/PCI maps them accordingly.
+ */
+
+static int
+otx_cpt_vf_init(struct cpt_vf *cptvf)
+{
+ int ret = 0;
+
+ /* Check ready with PF */
+ /* Gets chip ID / device Id from PF if ready */
+ ret = otx_cpt_check_pf_ready(cptvf);
+ if (ret) {
+ CPT_LOG_ERR("%s: PF not responding to READY msg",
+ cptvf->dev_name);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
+
+exit:
+ return ret;
+}
+
+/*
+ * Read Interrupt status of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static uint64_t
+otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf)
+{
+ return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0));
+}
+
+/*
+ * Clear mailbox interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.mbox = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear instruction NCB read error interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_irde_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.irde = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear NCB result write response error interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.nwrp = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear swerr interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.swerr = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear hwerr interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.hwerr = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear translation fault interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_fault_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.fault = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear doorbell overflow interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.dovf = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/* Write to VQX_CTL register
+ */
+static void
+otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val)
+{
+ cptx_vqx_ctl_t vqx_ctl;
+
+ vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_CTL(0, 0));
+ vqx_ctl.s.ena = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_CTL(0, 0), vqx_ctl.u);
+}
+
+/* Write to VQX_INPROG register
+ */
+static void
+otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val)
+{
+ cptx_vqx_inprog_t vqx_inprg;
+
+ vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_INPROG(0, 0));
+ vqx_inprg.s.inflight = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
+}
+
+/* Write to VQX_DONE_WAIT NUMWAIT register
+ */
+static void
+otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val)
+{
+ cptx_vqx_done_wait_t vqx_dwait;
+
+ vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0));
+ vqx_dwait.s.num_wait = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
+}
+
+/* Write to VQX_DONE_WAIT NUM_WAIT register
+ */
+static void
+otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val)
+{
+ cptx_vqx_done_wait_t vqx_dwait;
+
+ vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0));
+ vqx_dwait.s.time_wait = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
+}
+
+/* Write to VQX_SADDR register
+ */
+static void
+otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val)
+{
+ cptx_vqx_saddr_t vqx_saddr;
+
+ vqx_saddr.u = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
+}
+
+static void
+otx_cpt_vfvq_init(struct cpt_vf *cptvf)
+{
+ uint64_t base_addr = 0;
+
+ /* Disable the VQ */
+ otx_cpt_write_vq_ctl(cptvf, 0);
+
+ /* Reset the doorbell */
+ otx_cpt_write_vq_doorbell(cptvf, 0);
+ /* Clear inflight */
+ otx_cpt_write_vq_inprog(cptvf, 0);
+
+ /* Write VQ SADDR */
+ base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr);
+ otx_cpt_write_vq_saddr(cptvf, base_addr);
+
+ /* Configure timerhold / coalescence */
+ otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
+ otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD);
+
+ /* Enable the VQ */
+ otx_cpt_write_vq_ctl(cptvf, 1);
+}
+
+static int
+cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)
+{
+ int err;
+
+ /* Convey VQ LEN to PF */
+ err = otx_cpt_send_vq_size_msg(cptvf);
+ if (err) {
+ CPT_LOG_ERR("%s: PF not responding to QLEN msg",
+ cptvf->dev_name);
+ err = -EBUSY;
+ goto cleanup;
+ }
+
+ /* CPT VF device initialization */
+ otx_cpt_vfvq_init(cptvf);
+
+ /* Send msg to PF to assign currnet Q to required group */
+ cptvf->vfgrp = group;
+ err = otx_cpt_send_vf_grp_msg(cptvf, group);
+ if (err) {
+ CPT_LOG_ERR("%s: PF not responding to VF_GRP msg",
+ cptvf->dev_name);
+ err = -EBUSY;
+ goto cleanup;
+ }
+
+ CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
+ return 0;
+
+cleanup:
+ return err;
+}
+
+void
+otx_cpt_poll_misc(struct cpt_vf *cptvf)
+{
+ uint64_t intr;
+
+ intr = otx_cpt_read_vf_misc_intr_status(cptvf);
+
+ if (!intr)
+ return;
+
+ /* Check for MISC interrupt types */
+ if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
+ CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d",
+ cptvf->dev_name, (unsigned int long)intr, cptvf->vfid);
+ otx_cpt_handle_mbox_intr(cptvf);
+ otx_cpt_clear_mbox_intr(cptvf);
+ } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
+ otx_cpt_clear_irde_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt "
+ "0x%lx on CPT VF %d", cptvf->dev_name,
+ (unsigned int long)intr, cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
+ otx_cpt_clear_nwrp_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx"
+ " on CPT VF %d", cptvf->dev_name,
+ (unsigned int long)intr, cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) {
+ otx_cpt_clear_swerr_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) {
+ otx_cpt_clear_hwerr_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) {
+ otx_cpt_clear_fault_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
+ otx_cpt_clear_dovf_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else
+ CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d",
+ cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+}
+
+int
+otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name)
+{
+ memset(cptvf, 0, sizeof(struct cpt_vf));
+
+ /* Bar0 base address */
+ cptvf->reg_base = reg_base;
+ strncpy(cptvf->dev_name, name, 32);
+
+ cptvf->pdev = pdev;
+
+ /* To clear if there are any pending mbox msgs */
+ otx_cpt_poll_misc(cptvf);
+
+ if (otx_cpt_vf_init(cptvf)) {
+ CPT_LOG_ERR("Failed to initialize CPT VF device");
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+otx_cpt_deinit_device(void *dev)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+
+ /* Do misc work one last time */
+ otx_cpt_poll_misc(cptvf);
+
+ return 0;
+}
+
+int
+otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance)
+{
+ int ret = -ENOENT, len, qlen, i;
+ int chunk_len, chunks, chunk_size;
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+ struct cpt_instance *cpt_instance;
+ struct command_chunk *chunk_head = NULL, *chunk_prev = NULL;
+ struct command_chunk *chunk = NULL;
+ uint8_t *mem;
+ const struct rte_memzone *rz;
+ uint64_t dma_addr = 0, alloc_len, used_len;
+ uint64_t *next_ptr;
+ uint64_t pg_sz = sysconf(_SC_PAGESIZE);
+
+ CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name);
+
+ cpt_instance = &cptvf->instance;
+
+ memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue));
+ memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue));
+
+ /* Chunks are of fixed size buffers */
+ chunks = DEFAULT_CMD_QCHUNKS;
+ chunk_len = DEFAULT_CMD_QCHUNK_SIZE;
+
+ qlen = chunks * chunk_len;
+ /* Chunk size includes 8 bytes of next chunk ptr */
+ chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE;
+
+ /* For command chunk structures */
+ len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
+
+ /* For pending queue */
+ len += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+
+ /* So that instruction queues start as pg size aligned */
+ len = RTE_ALIGN(len, pg_sz);
+
+ /* For Instruction queues */
+ len += chunks * RTE_ALIGN(chunk_size, 128);
+
+ /* Wastage after instruction queues */
+ len = RTE_ALIGN(len, pg_sz);
+
+ rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node,
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_256MB,
+ RTE_CACHE_LINE_SIZE);
+ if (!rz) {
+ ret = rte_errno;
+ goto cleanup;
+ }
+
+ mem = rz->addr;
+ dma_addr = rz->phys_addr;
+ alloc_len = len;
+
+ memset(mem, 0, len);
+
+ cpt_instance->rsvd = (uintptr_t)rz;
+
+ /* Pending queue setup */
+ cptvf->pqueue.rid_queue = (struct rid *)mem;
+ cptvf->pqueue.enq_tail = 0;
+ cptvf->pqueue.deq_head = 0;
+ cptvf->pqueue.pending_count = 0;
+
+ mem += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ len -= qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+
+ /* Alignment wastage */
+ used_len = alloc_len - len;
+ mem += RTE_ALIGN(used_len, pg_sz) - used_len;
+ len -= RTE_ALIGN(used_len, pg_sz) - used_len;
+ dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len;
+
+ /* Init instruction queues */
+ chunk_head = &cptvf->cqueue.chead[0];
+ i = qlen;
+
+ chunk_prev = NULL;
+ for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) {
+ int csize;
+
+ chunk = &cptvf->cqueue.chead[i];
+ chunk->head = mem;
+ chunk->dma_addr = dma_addr;
+
+ csize = RTE_ALIGN(chunk_size, 128);
+ mem += csize;
+ dma_addr += csize;
+ len -= csize;
+
+ if (chunk_prev) {
+ next_ptr = (uint64_t *)(chunk_prev->head +
+ chunk_size - 8);
+ *next_ptr = (uint64_t)chunk->dma_addr;
+ }
+ chunk_prev = chunk;
+ }
+ /* Circular loop */
+ next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8);
+ *next_ptr = (uint64_t)chunk_head->dma_addr;
+
+ assert(!len);
+
+ /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */
+ cptvf->qsize = chunk_size / 8;
+ cptvf->cqueue.qhead = chunk_head->head;
+ cptvf->cqueue.idx = 0;
+ cptvf->cqueue.cchunk = 0;
+
+ if (cpt_vq_init(cptvf, group)) {
+ CPT_LOG_ERR("Failed to initialize CPT VQ of device %s",
+ cptvf->dev_name);
+ ret = -EBUSY;
+ goto cleanup;
+ }
+
+ *instance = cpt_instance;
+
+ CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name);
+
+ return 0;
+cleanup:
+ rte_memzone_free(rz);
+ *instance = NULL;
+ return ret;
+}
+
+int
+otx_cpt_put_resource(struct cpt_instance *instance)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct rte_memzone *rz;
+
+ if (!cptvf) {
+ CPT_LOG_ERR("Invalid CPTVF handle");
+ return -EINVAL;
+ }
+
+ CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name);
+
+ rz = (struct rte_memzone *)instance->rsvd;
+ rte_memzone_free(rz);
+ return 0;
+}
+
+int
+otx_cpt_start_device(void *dev)
+{
+ int rc;
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+
+ rc = otx_cpt_send_vf_up(cptvf);
+ if (rc) {
+ CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d",
+ cptvf->dev_name, rc);
+ return -EFAULT;
+ }
+
+ if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) {
+ CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF "
+ "device %s", cptvf->vftype, cptvf->dev_name);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+void
+otx_cpt_stop_device(void *dev)
+{
+ int rc;
+ uint32_t pending, retries = 5;
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+
+ /* Wait for pending entries to complete */
+ pending = otx_cpt_read_vq_doorbell(cptvf);
+ while (pending) {
+ CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete",
+ cptvf->dev_name, pending);
+ sleep(1);
+ pending = otx_cpt_read_vq_doorbell(cptvf);
+ retries--;
+ if (!retries)
+ break;
+ }
+
+ if (!retries && pending) {
+ CPT_LOG_ERR("%s: Timeout waiting for commands(%u)",
+ cptvf->dev_name, pending);
+ return;
+ }
+
+ rc = otx_cpt_send_vf_down(cptvf);
+ if (rc) {
+ CPT_LOG_ERR("Failed to bring down vf %s, rc %d",
+ cptvf->dev_name, rc);
+ return;
+ }
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.h b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
new file mode 100644
index 00000000..82b15eea
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+#ifndef _OTX_CRYPTODEV_HW_ACCESS_H_
+#define _OTX_CRYPTODEV_HW_ACCESS_H_
+
+#include <stdbool.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_memory.h>
+#include <rte_prefetch.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+#include "cpt_pmd_logs.h"
+
+#define CPT_INTR_POLL_INTERVAL_MS (50)
+
+/* Default command queue length */
+#define DEFAULT_CMD_QCHUNKS 2
+#define DEFAULT_CMD_QCHUNK_SIZE 1023
+#define DEFAULT_CMD_QLEN \
+ (DEFAULT_CMD_QCHUNK_SIZE * DEFAULT_CMD_QCHUNKS)
+
+#define CPT_CSR_REG_BASE(cpt) ((cpt)->reg_base)
+
+/* Read hw register */
+#define CPT_READ_CSR(__hw_addr, __offset) \
+ rte_read64_relaxed((uint8_t *)__hw_addr + __offset)
+
+/* Write hw register */
+#define CPT_WRITE_CSR(__hw_addr, __offset, __val) \
+ rte_write64_relaxed((__val), ((uint8_t *)__hw_addr + __offset))
+
+/* cpt instance */
+struct cpt_instance {
+ uint32_t queue_id;
+ uintptr_t rsvd;
+};
+
+struct command_chunk {
+ /** 128-byte aligned real_vaddr */
+ uint8_t *head;
+ /** 128-byte aligned real_dma_addr */
+ phys_addr_t dma_addr;
+};
+
+/**
+ * Command queue structure
+ */
+struct command_queue {
+ /** Command queue host write idx */
+ uint32_t idx;
+ /** Command queue chunk */
+ uint32_t cchunk;
+ /** Command queue head; instructions are inserted here */
+ uint8_t *qhead;
+ /** Command chunk list head */
+ struct command_chunk chead[DEFAULT_CMD_QCHUNKS];
+};
+
+/**
+ * CPT VF device structure
+ */
+struct cpt_vf {
+ /** CPT instance */
+ struct cpt_instance instance;
+ /** Register start address */
+ uint8_t *reg_base;
+ /** Command queue information */
+ struct command_queue cqueue;
+ /** Pending queue information */
+ struct pending_queue pqueue;
+ /** Meta information per vf */
+ struct cptvf_meta_info meta_info;
+
+ /** Below fields are accessed only in control path */
+
+ /** Env specific pdev representing the pci dev */
+ void *pdev;
+ /** Calculated queue size */
+ uint32_t qsize;
+ /** Device index (0...CPT_MAX_VQ_NUM)*/
+ uint8_t vfid;
+ /** VF type of cpt_vf_type_t (SE_TYPE(2) or AE_TYPE(1) */
+ uint8_t vftype;
+ /** VF group (0 - 8) */
+ uint8_t vfgrp;
+ /** Operating node: Bits (46:44) in BAR0 address */
+ uint8_t node;
+
+ /** VF-PF mailbox communication */
+
+ /** Flag if acked */
+ bool pf_acked;
+ /** Flag if not acked */
+ bool pf_nacked;
+
+ /** Device name */
+ char dev_name[32];
+} __rte_cache_aligned;
+
+/*
+ * CPT Registers map for 81xx
+ */
+
+/* VF registers */
+#define CPTX_VQX_CTL(a, b) (0x0000100ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_SADDR(a, b) (0x0000200ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_WAIT(a, b) (0x0000400ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_INPROG(a, b) (0x0000410ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_DONE(a, b) (0x0000420ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_ACK(a, b) (0x0000440ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_INT_W1S(a, b) (0x0000460ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_INT_W1C(a, b) (0x0000468ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_ENA_W1S(a, b) (0x0000470ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_ENA_W1C(a, b) (0x0000478ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_INT(a, b) (0x0000500ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_INT_W1S(a, b) (0x0000508ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_ENA_W1S(a, b) (0x0000510ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_ENA_W1C(a, b) (0x0000518ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DOORBELL(a, b) (0x0000600ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VFX_PF_MBOXX(a, b, c) (0x0001000ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b) + \
+ 8ll * ((c) & 0x1))
+
+/* VF HAL functions */
+
+void
+otx_cpt_poll_misc(struct cpt_vf *cptvf);
+
+int
+otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name);
+
+int
+otx_cpt_deinit_device(void *dev);
+
+int
+otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance);
+
+int
+otx_cpt_put_resource(struct cpt_instance *instance);
+
+int
+otx_cpt_start_device(void *cptvf);
+
+void
+otx_cpt_stop_device(void *cptvf);
+
+/* Write to VQX_DOORBELL register
+ */
+static __rte_always_inline void
+otx_cpt_write_vq_doorbell(struct cpt_vf *cptvf, uint32_t val)
+{
+ cptx_vqx_doorbell_t vqx_dbell;
+
+ vqx_dbell.u = 0;
+ vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u);
+}
+
+static __rte_always_inline uint32_t
+otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf)
+{
+ cptx_vqx_doorbell_t vqx_dbell;
+
+ vqx_dbell.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DOORBELL(0, 0));
+ return vqx_dbell.s.dbell_cnt;
+}
+
+static __rte_always_inline void
+otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ /* Memory barrier to flush pending writes */
+ rte_smp_wmb();
+ otx_cpt_write_vq_doorbell(cptvf, count);
+}
+
+static __rte_always_inline void *
+get_cpt_inst(struct command_queue *cqueue)
+{
+ CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx);
+ return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE];
+}
+
+static __rte_always_inline void
+fill_cpt_inst(struct cpt_instance *instance, void *req)
+{
+ struct command_queue *cqueue;
+ cpt_inst_s_t *cpt_ist_p;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct cpt_request_info *user_req = (struct cpt_request_info *)req;
+ cqueue = &cptvf->cqueue;
+ cpt_ist_p = get_cpt_inst(cqueue);
+ rte_prefetch_non_temporal(cpt_ist_p);
+
+ /* EI0, EI1, EI2, EI3 are already prepared */
+ /* HW W0 */
+ cpt_ist_p->u[0] = 0;
+ /* HW W1 */
+ cpt_ist_p->s8x.res_addr = user_req->comp_baddr;
+ /* HW W2 */
+ cpt_ist_p->u[2] = 0;
+ /* HW W3 */
+ cpt_ist_p->s8x.wq_ptr = 0;
+
+ /* MC EI0 */
+ cpt_ist_p->s8x.ei0 = user_req->ist.ei0;
+ /* MC EI1 */
+ cpt_ist_p->s8x.ei1 = user_req->ist.ei1;
+ /* MC EI2 */
+ cpt_ist_p->s8x.ei2 = user_req->ist.ei2;
+ /* MC EI3 */
+ cpt_ist_p->s8x.ei3 = user_req->ist.ei3;
+}
+
+static __rte_always_inline void
+mark_cpt_inst(struct cpt_instance *instance)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct command_queue *queue = &cptvf->cqueue;
+ if (unlikely(++queue->idx >= DEFAULT_CMD_QCHUNK_SIZE)) {
+ uint32_t cchunk = queue->cchunk;
+ MOD_INC(cchunk, DEFAULT_CMD_QCHUNKS);
+ queue->qhead = queue->chead[cchunk].head;
+ queue->idx = 0;
+ queue->cchunk = cchunk;
+ }
+}
+
+static __rte_always_inline uint8_t
+check_nb_command_id(struct cpt_request_info *user_req,
+ struct cpt_instance *instance)
+{
+ uint8_t ret = ERR_REQ_PENDING;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ volatile cpt_res_s_t *cptres;
+
+ cptres = (volatile cpt_res_s_t *)user_req->completion_addr;
+
+ if (unlikely(cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE)) {
+ /*
+ * Wait for some time for this command to get completed
+ * before timing out
+ */
+ if (rte_get_timer_cycles() < user_req->time_out)
+ return ret;
+ /*
+ * TODO: See if alternate caddr can be used to not loop
+ * longer than needed.
+ */
+ if ((cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE) &&
+ (user_req->extra_time < TIME_IN_RESET_COUNT)) {
+ user_req->extra_time++;
+ return ret;
+ }
+
+ if (cptres->s8x.compcode != CPT_8X_COMP_E_NOTDONE)
+ goto complete;
+
+ ret = ERR_REQ_TIMEOUT;
+ CPT_LOG_DP_ERR("Request %p timedout", user_req);
+ otx_cpt_poll_misc(cptvf);
+ goto exit;
+ }
+
+complete:
+ if (likely(cptres->s8x.compcode == CPT_8X_COMP_E_GOOD)) {
+ ret = 0; /* success */
+ if (unlikely((uint8_t)*user_req->alternate_caddr)) {
+ ret = (uint8_t)*user_req->alternate_caddr;
+ CPT_LOG_DP_ERR("Request %p : failed with microcode"
+ " error, MC completion code : 0x%x", user_req,
+ ret);
+ }
+ CPT_LOG_DP_DEBUG("MC status %.8x\n",
+ *((volatile uint32_t *)user_req->alternate_caddr));
+ CPT_LOG_DP_DEBUG("HW status %.8x\n",
+ *((volatile uint32_t *)user_req->completion_addr));
+ } else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) ||
+ (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) {
+ ret = (uint8_t)*user_req->alternate_caddr;
+ if (!ret)
+ ret = ERR_BAD_ALT_CCODE;
+ CPT_LOG_DP_DEBUG("Request %p : failed with %s : err code :%x",
+ user_req,
+ (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT) ?
+ "DMA Fault" : "Software error", ret);
+ } else {
+ CPT_LOG_DP_ERR("Request %p : unexpected completion code %d",
+ user_req, cptres->s8x.compcode);
+ ret = (uint8_t)*user_req->alternate_caddr;
+ }
+
+exit:
+ return ret;
+}
+
+#endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_mbox.c b/drivers/crypto/octeontx/otx_cryptodev_mbox.c
new file mode 100644
index 00000000..a8e51a8e
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_mbox.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <unistd.h>
+
+#include "otx_cryptodev_hw_access.h"
+#include "otx_cryptodev_mbox.h"
+
+void
+otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ /*
+ * MBOX[0] contains msg
+ * MBOX[1] contains data
+ */
+ mbx.msg = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 0));
+ mbx.data = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 1));
+
+ CPT_LOG_DP_DEBUG("%s: Mailbox msg 0x%lx from PF",
+ cptvf->dev_name, (unsigned int long)mbx.msg);
+ switch (mbx.msg) {
+ case OTX_CPT_MSG_READY:
+ {
+ otx_cpt_chipid_vfid_t cid;
+
+ cid.u64 = mbx.data;
+ cptvf->pf_acked = true;
+ cptvf->vfid = cid.s.vfid;
+ CPT_LOG_DP_DEBUG("%s: Received VFID %d chip_id %d",
+ cptvf->dev_name,
+ cptvf->vfid, cid.s.chip_id);
+ }
+ break;
+ case OTX_CPT_MSG_QBIND_GRP:
+ cptvf->pf_acked = true;
+ cptvf->vftype = mbx.data;
+ CPT_LOG_DP_DEBUG("%s: VF %d type %s group %d",
+ cptvf->dev_name, cptvf->vfid,
+ ((mbx.data == SE_TYPE) ? "SE" : "AE"),
+ cptvf->vfgrp);
+ break;
+ case OTX_CPT_MBOX_MSG_TYPE_ACK:
+ cptvf->pf_acked = true;
+ break;
+ case OTX_CPT_MBOX_MSG_TYPE_NACK:
+ cptvf->pf_nacked = true;
+ break;
+ default:
+ CPT_LOG_DP_DEBUG("%s: Invalid msg from PF, msg 0x%lx",
+ cptvf->dev_name, (unsigned int long)mbx.msg);
+ break;
+ }
+}
+
+/* Send a mailbox message to PF
+ * @vf: vf from which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void
+otx_cpt_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
+{
+ /* Writing mbox(1) causes interrupt */
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 0), mbx->msg);
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 1), mbx->data);
+}
+
+static int32_t
+otx_cpt_send_msg_to_pf_timeout(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
+{
+ int timeout = OTX_CPT_MBOX_MSG_TIMEOUT;
+ int sleep_ms = 10;
+
+ cptvf->pf_acked = false;
+ cptvf->pf_nacked = false;
+
+ otx_cpt_send_msg_to_pf(cptvf, mbx);
+
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!cptvf->pf_acked) {
+ if (cptvf->pf_nacked)
+ return -EINVAL;
+ usleep(sleep_ms * 1000);
+ otx_cpt_poll_misc(cptvf);
+ if (cptvf->pf_acked)
+ break;
+ timeout -= sleep_ms;
+ if (!timeout) {
+ CPT_LOG_ERR("%s: PF didn't ack mbox msg %lx(vfid %u)",
+ cptvf->dev_name,
+ (unsigned int long)(mbx->msg & 0xFF),
+ cptvf->vfid);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+int
+otx_cpt_check_pf_ready(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_READY;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to READY msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vq_size_msg(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_QLEN;
+
+ mbx.data = cptvf->qsize;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to vq_size msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vf_grp_msg(struct cpt_vf *cptvf, uint32_t group)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_QBIND_GRP;
+
+ /* Convey group of the VF */
+ mbx.data = group;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to vf_type msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vf_up(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_VF_UP;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to UP msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vf_down(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_VF_DOWN;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to DOWN msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_mbox.h b/drivers/crypto/octeontx/otx_cryptodev_mbox.h
new file mode 100644
index 00000000..b05d1c50
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_mbox.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_MBOX_H_
+#define _OTX_CRYPTODEV_MBOX_H_
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include "cpt_common.h"
+#include "cpt_pmd_logs.h"
+
+#include "otx_cryptodev_hw_access.h"
+
+#define OTX_CPT_MBOX_MSG_TIMEOUT 2000 /* In Milli Seconds */
+
+#define OTX_CPT_MBOX_MSG_TYPE_REQ 0
+#define OTX_CPT_MBOX_MSG_TYPE_ACK 1
+#define OTX_CPT_MBOX_MSG_TYPE_NACK 2
+#define OTX_CPT_MBOX_MSG_TYPE_NOP 3
+
+/* CPT mailbox structure */
+struct cpt_mbox {
+ /** Message type MBOX[0] */
+ uint64_t msg;
+ /** Data MBOX[1] */
+ uint64_t data;
+};
+
+typedef enum {
+ OTX_CPT_MSG_VF_UP = 1,
+ OTX_CPT_MSG_VF_DOWN,
+ OTX_CPT_MSG_READY,
+ OTX_CPT_MSG_QLEN,
+ OTX_CPT_MSG_QBIND_GRP,
+ OTX_CPT_MSG_VQ_PRIORITY,
+ OTX_CPT_MSG_PF_TYPE,
+} otx_cpt_mbox_opcode_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint32_t chip_id;
+ uint8_t vfid;
+ uint8_t reserved[3];
+#else
+ uint8_t reserved[3];
+ uint8_t vfid;
+ uint32_t chip_id;
+#endif
+ } s;
+} otx_cpt_chipid_vfid_t;
+
+/* Poll handler to handle mailbox messages from VFs */
+void
+otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf);
+
+/*
+ * Checks if VF is able to comminicate with PF
+ * and also gets the CPT number this VF is associated to.
+ */
+int
+otx_cpt_check_pf_ready(struct cpt_vf *cptvf);
+
+/*
+ * Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF.
+ * Must be ACKed.
+ */
+int
+otx_cpt_send_vq_size_msg(struct cpt_vf *cptvf);
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int
+otx_cpt_send_vf_grp_msg(struct cpt_vf *cptvf, uint32_t group);
+
+/*
+ * Communicate to PF that VF is UP and running
+ */
+int
+otx_cpt_send_vf_up(struct cpt_vf *cptvf);
+
+/*
+ * Communicate to PF that VF is DOWN and running
+ */
+int
+otx_cpt_send_vf_down(struct cpt_vf *cptvf);
+
+#endif /* _OTX_CRYPTODEV_MBOX_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
new file mode 100644
index 00000000..23f96591
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_alarm.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "cpt_pmd_logs.h"
+#include "cpt_pmd_ops_helper.h"
+#include "cpt_ucode.h"
+#include "cpt_request_mgr.h"
+
+#include "otx_cryptodev.h"
+#include "otx_cryptodev_capabilities.h"
+#include "otx_cryptodev_hw_access.h"
+#include "otx_cryptodev_ops.h"
+
+static int otx_cryptodev_probe_count;
+static rte_spinlock_t otx_probe_count_lock = RTE_SPINLOCK_INITIALIZER;
+
+static struct rte_mempool *otx_cpt_meta_pool;
+static int otx_cpt_op_mlen;
+static int otx_cpt_op_sb_mlen;
+
+/* Forward declarations */
+
+static int
+otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
+
+/*
+ * Initializes global variables used by fast-path code
+ *
+ * @return
+ * - 0 on success, errcode on error
+ */
+static int
+init_global_resources(void)
+{
+ /* Get meta len for scatter gather mode */
+ otx_cpt_op_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
+
+ /* Extra 4B saved for future considerations */
+ otx_cpt_op_mlen += 4 * sizeof(uint64_t);
+
+ otx_cpt_meta_pool = rte_mempool_create("cpt_metabuf-pool", 4096 * 16,
+ otx_cpt_op_mlen, 512, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!otx_cpt_meta_pool) {
+ CPT_LOG_ERR("cpt metabuf pool not created");
+ return -ENOMEM;
+ }
+
+ /* Get meta len for direct mode */
+ otx_cpt_op_sb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
+
+ /* Extra 4B saved for future considerations */
+ otx_cpt_op_sb_mlen += 4 * sizeof(uint64_t);
+
+ return 0;
+}
+
+void
+cleanup_global_resources(void)
+{
+ /* Take lock */
+ rte_spinlock_lock(&otx_probe_count_lock);
+
+ /* Decrement the cryptodev count */
+ otx_cryptodev_probe_count--;
+
+ /* Free buffers */
+ if (otx_cpt_meta_pool && otx_cryptodev_probe_count == 0)
+ rte_mempool_free(otx_cpt_meta_pool);
+
+ /* Free lock */
+ rte_spinlock_unlock(&otx_probe_count_lock);
+}
+
+/* Alarm routines */
+
+static void
+otx_cpt_alarm_cb(void *arg)
+{
+ struct cpt_vf *cptvf = arg;
+ otx_cpt_poll_misc(cptvf);
+ rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
+ otx_cpt_alarm_cb, cptvf);
+}
+
+static int
+otx_cpt_periodic_alarm_start(void *arg)
+{
+ return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
+ otx_cpt_alarm_cb, arg);
+}
+
+static int
+otx_cpt_periodic_alarm_stop(void *arg)
+{
+ return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
+}
+
+/* PMD ops */
+
+static int
+otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static int
+otx_cpt_dev_start(struct rte_cryptodev *c_dev)
+{
+ void *cptvf = c_dev->data->dev_private;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ return otx_cpt_start_device(cptvf);
+}
+
+static void
+otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
+{
+ void *cptvf = c_dev->data->dev_private;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ otx_cpt_stop_device(cptvf);
+}
+
+static int
+otx_cpt_dev_close(struct rte_cryptodev *c_dev)
+{
+ void *cptvf = c_dev->data->dev_private;
+ int i, ret;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
+ ret = otx_cpt_que_pair_release(c_dev, i);
+ if (ret)
+ return ret;
+ }
+
+ otx_cpt_periodic_alarm_stop(cptvf);
+ otx_cpt_deinit_device(cptvf);
+
+ return 0;
+}
+
+static void
+otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = otx_get_capabilities();
+ info->sym.max_nb_sessions = 0;
+ info->driver_id = otx_cryptodev_driver_id;
+ info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
+ info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
+ }
+}
+
+static void
+otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_stats *stats __rte_unused)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+}
+
+static void
+otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+}
+
+static int
+otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
+ uint16_t que_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id __rte_unused,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ void *cptvf = dev->data->dev_private;
+ struct cpt_instance *instance = NULL;
+ struct rte_pci_device *pci_dev;
+ int ret = -1;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->queue_pairs[que_pair_id] != NULL) {
+ ret = otx_cpt_que_pair_release(dev, que_pair_id);
+ if (ret)
+ return ret;
+ }
+
+ if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
+ CPT_LOG_INFO("Number of descriptors too big %d, using default "
+ "queue length of %d", qp_conf->nb_descriptors,
+ DEFAULT_CMD_QLEN);
+ }
+
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ CPT_LOG_ERR("PCI mem address null");
+ return -EIO;
+ }
+
+ ret = otx_cpt_get_resource(cptvf, 0, &instance);
+ if (ret != 0) {
+ CPT_LOG_ERR("Error getting instance handle from device %s : "
+ "ret = %d", dev->data->name, ret);
+ return ret;
+ }
+
+ instance->queue_id = que_pair_id;
+ dev->data->queue_pairs[que_pair_id] = instance;
+
+ return 0;
+}
+
+static int
+otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
+{
+ struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
+ int ret;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ ret = otx_cpt_put_resource(instance);
+ if (ret != 0) {
+ CPT_LOG_ERR("Error putting instance handle of device %s : "
+ "ret = %d", dev->data->name, ret);
+ return ret;
+ }
+
+ dev->data->queue_pairs[que_pair_id] = NULL;
+
+ return 0;
+}
+
+static unsigned int
+otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return cpt_get_session_size();
+}
+
+static void
+otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
+{
+ struct rte_cryptodev_sym_session *sess = sym_sess;
+ struct cpt_sess_misc *cpt_sess =
+ (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
+
+ CPT_PMD_INIT_FUNC_TRACE();
+ cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
+ sizeof(struct cpt_sess_misc);
+}
+
+static int
+otx_cpt_session_cfg(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ struct rte_crypto_sym_xform *chain;
+ void *sess_private_data = NULL;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (cpt_is_algo_supported(xform))
+ goto err;
+
+ if (unlikely(sess == NULL)) {
+ CPT_LOG_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CPT_LOG_ERR("Could not allocate sess_private_data");
+ return -ENOMEM;
+ }
+
+ chain = xform;
+ while (chain) {
+ switch (chain->type) {
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ if (fill_sess_aead(chain, sess_private_data))
+ goto err;
+ break;
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ if (fill_sess_cipher(chain, sess_private_data))
+ goto err;
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (fill_sess_gmac(chain, sess_private_data))
+ goto err;
+ } else {
+ if (fill_sess_auth(chain, sess_private_data))
+ goto err;
+ }
+ break;
+ default:
+ CPT_LOG_ERR("Invalid crypto xform type");
+ break;
+ }
+ chain = chain->next;
+ }
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+ otx_cpt_session_init(sess, dev->driver_id);
+ return 0;
+
+err:
+ if (sess_private_data)
+ rte_mempool_put(mempool, sess_private_data);
+ return -EPERM;
+}
+
+static void
+otx_cpt_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+
+ CPT_PMD_INIT_FUNC_TRACE();
+ if (sess_priv) {
+ memset(sess_priv, 0, otx_cpt_get_session_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, dev->driver_id, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static uint16_t
+otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cpt_instance *instance = (struct cpt_instance *)qptr;
+ uint16_t count = 0;
+ int ret;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct pending_queue *pqueue = &cptvf->pqueue;
+
+ count = DEFAULT_CMD_QLEN - pqueue->pending_count;
+ if (nb_ops > count)
+ nb_ops = count;
+
+ count = 0;
+ while (likely(count < nb_ops)) {
+ ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
+ otx_cryptodev_driver_id);
+ if (unlikely(ret))
+ break;
+ count++;
+ }
+ otx_cpt_ring_dbell(instance, count);
+ return count;
+}
+
+static uint16_t
+otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cpt_instance *instance = (struct cpt_instance *)qptr;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct pending_queue *pqueue = &cptvf->pqueue;
+ uint16_t nb_completed, i = 0;
+ uint8_t compcode[nb_ops];
+
+ nb_completed = cpt_dequeue_burst(instance, nb_ops,
+ (void **)ops, compcode, pqueue);
+ while (likely(i < nb_completed)) {
+ struct rte_crypto_op *cop;
+ void *metabuf;
+ uintptr_t *rsp;
+ uint8_t status;
+
+ rsp = (void *)ops[i];
+ status = compcode[i];
+ if (likely((i + 1) < nb_completed))
+ rte_prefetch0(ops[i+1]);
+ metabuf = (void *)rsp[0];
+ cop = (void *)rsp[1];
+
+ ops[i] = cop;
+
+ if (likely(status == 0)) {
+ if (likely(!rsp[2]))
+ cop->status =
+ RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ compl_auth_verify(cop, (uint8_t *)rsp[2],
+ rsp[3]);
+ } else if (status == ERR_GC_ICV_MISCOMPARE) {
+ /*auth data mismatch */
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
+ i++;
+ }
+ return nb_completed;
+}
+
+static struct rte_cryptodev_ops cptvf_ops = {
+ /* Device related operations */
+ .dev_configure = otx_cpt_dev_config,
+ .dev_start = otx_cpt_dev_start,
+ .dev_stop = otx_cpt_dev_stop,
+ .dev_close = otx_cpt_dev_close,
+ .dev_infos_get = otx_cpt_dev_info_get,
+
+ .stats_get = otx_cpt_stats_get,
+ .stats_reset = otx_cpt_stats_reset,
+ .queue_pair_setup = otx_cpt_que_pair_setup,
+ .queue_pair_release = otx_cpt_que_pair_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = otx_cpt_get_session_size,
+ .sym_session_configure = otx_cpt_session_cfg,
+ .sym_session_clear = otx_cpt_session_clear
+};
+
+static void
+otx_cpt_common_vars_init(struct cpt_vf *cptvf)
+{
+ cptvf->meta_info.cptvf_meta_pool = otx_cpt_meta_pool;
+ cptvf->meta_info.cptvf_op_mlen = otx_cpt_op_mlen;
+ cptvf->meta_info.cptvf_op_sb_mlen = otx_cpt_op_sb_mlen;
+}
+
+int
+otx_cpt_dev_create(struct rte_cryptodev *c_dev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
+ struct cpt_vf *cptvf = NULL;
+ void *reg_base;
+ char dev_name[32];
+ int ret;
+
+ if (pdev->mem_resource[0].phys_addr == 0ULL)
+ return -EIO;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
+ sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cptvf == NULL) {
+ CPT_LOG_ERR("Cannot allocate memory for device private data");
+ return -ENOMEM;
+ }
+
+ snprintf(dev_name, 32, "%02x:%02x.%x",
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+
+ reg_base = pdev->mem_resource[0].addr;
+ if (!reg_base) {
+ CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
+ if (ret) {
+ CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
+ ret = -EIO;
+ goto fail;
+ }
+
+ /* Start off timer for mailbox interrupts */
+ otx_cpt_periodic_alarm_start(cptvf);
+
+ rte_spinlock_lock(&otx_probe_count_lock);
+ if (!otx_cryptodev_probe_count) {
+ ret = init_global_resources();
+ if (ret) {
+ rte_spinlock_unlock(&otx_probe_count_lock);
+ goto init_fail;
+ }
+ }
+ otx_cryptodev_probe_count++;
+ rte_spinlock_unlock(&otx_probe_count_lock);
+
+ /* Initialize data path variables used by common code */
+ otx_cpt_common_vars_init(cptvf);
+
+ c_dev->dev_ops = &cptvf_ops;
+
+ c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
+ c_dev->dequeue_burst = otx_cpt_pkt_dequeue;
+
+ c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
+
+ /* Save dev private data */
+ c_dev->data->dev_private = cptvf;
+
+ return 0;
+
+init_fail:
+ otx_cpt_periodic_alarm_stop(cptvf);
+ otx_cpt_deinit_device(cptvf);
+
+fail:
+ if (cptvf) {
+ /* Free private data allocated */
+ rte_free(cptvf);
+ }
+
+ return ret;
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.h b/drivers/crypto/octeontx/otx_cryptodev_ops.h
new file mode 100644
index 00000000..b3efecf0
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_OPS_H_
+#define _OTX_CRYPTODEV_OPS_H_
+
+#define OTX_CPT_MIN_HEADROOM_REQ (24)
+#define OTX_CPT_MIN_TAILROOM_REQ (8)
+#define CPT_NUM_QS_PER_VF (1)
+
+void
+cleanup_global_resources(void);
+
+int
+otx_cpt_dev_create(struct rte_cryptodev *c_dev);
+
+#endif /* _OTX_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map b/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map
new file mode 100644
index 00000000..521e51f4
--- /dev/null
+++ b/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map
@@ -0,0 +1,4 @@
+DPDK_18.11 {
+
+ local: *;
+};
diff --git a/drivers/crypto/openssl/compat.h b/drivers/crypto/openssl/compat.h
index 45f9a33d..eecb7d36 100644
--- a/drivers/crypto/openssl/compat.h
+++ b/drivers/crypto/openssl/compat.h
@@ -7,101 +7,190 @@
#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
-#define set_rsa_params(rsa, p, q, ret) \
- do {rsa->p = p; rsa->q = q; ret = 0; } while (0)
-
-#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
- do { \
- rsa->dmp1 = dmp1; \
- rsa->dmq1 = dmq1; \
- rsa->iqmp = iqmp; \
- ret = 0; \
- } while (0)
-
-#define set_rsa_keys(rsa, n, e, d, ret) \
- do { \
- rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \
- } while (0)
-
-#define set_dh_params(dh, p, g, ret) \
- do { \
- dh->p = p; \
- dh->q = NULL; \
- dh->g = g; \
- ret = 0; \
- } while (0)
-
-#define set_dh_priv_key(dh, priv_key, ret) \
- do { dh->priv_key = priv_key; ret = 0; } while (0)
-
-#define set_dsa_params(dsa, p, q, g, ret) \
- do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0)
-
-#define get_dh_pub_key(dh, pub_key) \
- (pub_key = dh->pub_key)
-
-#define get_dh_priv_key(dh, priv_key) \
- (priv_key = dh->priv_key)
-
-#define set_dsa_sign(sign, r, s) \
- do { sign->r = r; sign->s = s; } while (0)
-
-#define get_dsa_sign(sign, r, s) \
- do { r = sign->r; s = sign->s; } while (0)
-
-#define set_dsa_keys(dsa, pub, priv, ret) \
- do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0)
-
-#define set_dsa_pub_key(dsa, pub_key) \
- (dsa->pub_key = pub_key)
-
-#define get_dsa_priv_key(dsa, priv_key) \
- (priv_key = dsa->priv_key)
+static __rte_always_inline int
+set_rsa_params(RSA *rsa, BIGNUM *p, BIGNUM *q)
+{
+ rsa->p = p;
+ rsa->q = q;
+ return 0;
+}
+
+static __rte_always_inline int
+set_rsa_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp)
+{
+ rsa->dmp1 = dmp1;
+ rsa->dmq1 = dmq1;
+ rsa->iqmp = iqmp;
+ return 0;
+}
+
+static __rte_always_inline int
+set_rsa_keys(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d)
+{
+ rsa->n = n;
+ rsa->e = e;
+ rsa->d = d;
+ return 0;
+}
+
+static __rte_always_inline int
+set_dh_params(DH *dh, BIGNUM *p, BIGNUM *g)
+{
+ dh->p = p;
+ dh->q = NULL;
+ dh->g = g;
+ return 0;
+}
+
+static __rte_always_inline int
+set_dh_priv_key(DH *dh, BIGNUM *priv_key)
+{
+ dh->priv_key = priv_key;
+ return 0;
+}
+
+static __rte_always_inline int
+set_dsa_params(DSA *dsa, BIGNUM *p, BIGNUM *q, BIGNUM *g)
+{
+ dsa->p = p;
+ dsa->q = q;
+ dsa->g = g;
+ return 0;
+}
+
+static __rte_always_inline void
+get_dh_pub_key(DH *dh, const BIGNUM **pub_key)
+{
+ *pub_key = dh->pub_key;
+}
+
+static __rte_always_inline void
+get_dh_priv_key(DH *dh, const BIGNUM **priv_key)
+{
+ *priv_key = dh->priv_key;
+}
+
+static __rte_always_inline void
+set_dsa_sign(DSA_SIG *sign, BIGNUM *r, BIGNUM *s)
+{
+ sign->r = r;
+ sign->s = s;
+}
+
+static __rte_always_inline void
+get_dsa_sign(DSA_SIG *sign, const BIGNUM **r, const BIGNUM **s)
+{
+ *r = sign->r;
+ *s = sign->s;
+}
+
+static __rte_always_inline int
+set_dsa_keys(DSA *dsa, BIGNUM *pub, BIGNUM *priv)
+{
+ dsa->pub_key = pub;
+ dsa->priv_key = priv;
+ return 0;
+}
+
+static __rte_always_inline void
+set_dsa_pub_key(DSA *dsa, BIGNUM *pub)
+{
+ dsa->pub_key = pub;
+}
+
+static __rte_always_inline void
+get_dsa_priv_key(DSA *dsa, BIGNUM **priv_key)
+{
+ *priv_key = dsa->priv_key;
+}
#else
-#define set_rsa_params(rsa, p, q, ret) \
- (ret = !RSA_set0_factors(rsa, p, q))
+static __rte_always_inline int
+set_rsa_params(RSA *rsa, BIGNUM *p, BIGNUM *q)
+{
+ return !(RSA_set0_factors(rsa, p, q));
+}
-#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
- (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))
+static __rte_always_inline int
+set_rsa_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp)
+{
+ return !(RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp));
+}
/* n, e must be non-null, d can be NULL */
-#define set_rsa_keys(rsa, n, e, d, ret) \
- (ret = !RSA_set0_key(rsa, n, e, d))
-
-#define set_dh_params(dh, p, g, ret) \
- (ret = !DH_set0_pqg(dh, p, NULL, g))
-
-#define set_dh_priv_key(dh, priv_key, ret) \
- (ret = !DH_set0_key(dh, NULL, priv_key))
-
-#define get_dh_pub_key(dh, pub_key) \
- (DH_get0_key(dh_key, &pub_key, NULL))
-
-#define get_dh_priv_key(dh, priv_key) \
- (DH_get0_key(dh_key, NULL, &priv_key))
-
-#define set_dsa_params(dsa, p, q, g, ret) \
- (ret = !DSA_set0_pqg(dsa, p, q, g))
-
-#define set_dsa_priv_key(dsa, priv_key) \
- (DSA_set0_key(dsa, NULL, priv_key))
-
-#define set_dsa_sign(sign, r, s) \
- (DSA_SIG_set0(sign, r, s))
-
-#define get_dsa_sign(sign, r, s) \
- (DSA_SIG_get0(sign, &r, &s))
-
-#define set_dsa_keys(dsa, pub, priv, ret) \
- (ret = !DSA_set0_key(dsa, pub, priv))
-
-#define set_dsa_pub_key(dsa, pub_key) \
- (DSA_set0_key(dsa, pub_key, NULL))
-#define get_dsa_priv_key(dsa, priv_key) \
- (DSA_get0_key(dsa, NULL, &priv_key))
+static __rte_always_inline int
+set_rsa_keys(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d)
+{
+ return !(RSA_set0_key(rsa, n, e, d));
+}
+
+static __rte_always_inline int
+set_dh_params(DH *dh, BIGNUM *p, BIGNUM *g)
+{
+ return !(DH_set0_pqg(dh, p, NULL, g));
+}
+
+static __rte_always_inline int
+set_dh_priv_key(DH *dh, BIGNUM *priv_key)
+{
+ return !(DH_set0_key(dh, NULL, priv_key));
+}
+
+static __rte_always_inline void
+get_dh_pub_key(DH *dh_key, const BIGNUM **pub_key)
+{
+ DH_get0_key(dh_key, pub_key, NULL);
+}
+
+static __rte_always_inline void
+get_dh_priv_key(DH *dh_key, const BIGNUM **priv_key)
+{
+ DH_get0_key(dh_key, NULL, priv_key);
+}
+
+static __rte_always_inline int
+set_dsa_params(DSA *dsa, BIGNUM *p, BIGNUM *q, BIGNUM *g)
+{
+ return !(DSA_set0_pqg(dsa, p, q, g));
+}
+
+static __rte_always_inline void
+set_dsa_priv_key(DSA *dsa, BIGNUM *priv_key)
+{
+ DSA_set0_key(dsa, NULL, priv_key);
+}
+
+static __rte_always_inline void
+set_dsa_sign(DSA_SIG *sign, BIGNUM *r, BIGNUM *s)
+{
+ DSA_SIG_set0(sign, r, s);
+}
+
+static __rte_always_inline void
+get_dsa_sign(DSA_SIG *sign, const BIGNUM **r, const BIGNUM **s)
+{
+ DSA_SIG_get0(sign, r, s);
+}
+
+static __rte_always_inline int
+set_dsa_keys(DSA *dsa, BIGNUM *pub, BIGNUM *priv)
+{
+ return !(DSA_set0_key(dsa, pub, priv));
+}
+
+static __rte_always_inline void
+set_dsa_pub_key(DSA *dsa, BIGNUM *pub_key)
+{
+ DSA_set0_key(dsa, pub_key, NULL);
+}
+
+static __rte_always_inline void
+get_dsa_priv_key(DSA *dsa, const BIGNUM **priv_key)
+{
+ DSA_get0_key(dsa, NULL, priv_key);
+}
#endif /* version < 10100000 */
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 7d263aba..003116dc 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1509,15 +1509,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
srclen = op->sym->auth.data.length;
- if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst = qp->temp_digest;
- else {
- dst = op->sym->auth.digest.data;
- if (dst == NULL)
- dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
- op->sym->auth.data.offset +
- op->sym->auth.data.length);
- }
+ dst = qp->temp_digest;
switch (sess->auth.mode) {
case OPENSSL_AUTH_AS_AUTH:
@@ -1540,6 +1532,15 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
+ } else {
+ uint8_t *auth_dst;
+
+ auth_dst = op->sym->auth.digest.data;
+ if (auth_dst == NULL)
+ auth_dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ memcpy(auth_dst, dst, sess->auth.digest_length);
}
if (status != 0)
@@ -1564,7 +1565,7 @@ process_openssl_dsa_sign_op(struct rte_crypto_op *cop,
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
} else {
const BIGNUM *r = NULL, *s = NULL;
- get_dsa_sign(sign, r, s);
+ get_dsa_sign(sign, &r, &s);
op->r.length = BN_bn2bin(r, op->r.data);
op->s.length = BN_bn2bin(s, op->s.data);
@@ -1666,7 +1667,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return -1;
}
- set_dh_priv_key(dh_key, priv_key, ret);
+ ret = set_dh_priv_key(dh_key, priv_key);
if (ret) {
OPENSSL_LOG(ERR, "Failed to set private key\n");
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -1715,7 +1716,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return -1;
}
- set_dh_priv_key(dh_key, priv_key, ret);
+ ret = set_dh_priv_key(dh_key, priv_key);
if (ret) {
OPENSSL_LOG(ERR, "Failed to set private key\n");
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -1743,7 +1744,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
__func__, __LINE__);
/* get the generated keys */
- get_dh_pub_key(dh_key, pub_key);
+ get_dh_pub_key(dh_key, &pub_key);
/* output public key */
op->pub_key.length = BN_bn2bin(pub_key,
@@ -1758,7 +1759,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
__func__, __LINE__);
/* get the generated keys */
- get_dh_priv_key(dh_key, priv_key);
+ get_dh_priv_key(dh_key, &priv_key);
/* provide generated private key back to user */
op->priv_key.length = BN_bn2bin(priv_key,
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index de228439..c2b029ec 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -26,9 +26,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -68,9 +68,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -110,9 +110,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -131,9 +131,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 0
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -152,9 +152,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -194,9 +194,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -236,9 +236,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -875,14 +875,14 @@ static int openssl_set_asym_session_parameters(
RSA_free(rsa);
goto err_rsa;
}
- set_rsa_params(rsa, p, q, ret);
+ ret = set_rsa_params(rsa, p, q);
if (ret) {
OPENSSL_LOG(ERR,
"failed to set rsa params\n");
RSA_free(rsa);
goto err_rsa;
}
- set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret);
+ ret = set_rsa_crt_params(rsa, dmp1, dmq1, iqmp);
if (ret) {
OPENSSL_LOG(ERR,
"failed to set crt params\n");
@@ -896,7 +896,7 @@ static int openssl_set_asym_session_parameters(
}
}
- set_rsa_keys(rsa, n, e, d, ret);
+ ret = set_rsa_keys(rsa, n, e, d);
if (ret) {
OPENSSL_LOG(ERR, "Failed to load rsa keys\n");
RSA_free(rsa);
@@ -1005,7 +1005,7 @@ err_rsa:
"failed to allocate resources\n");
goto err_dh;
}
- set_dh_params(dh, p, g, ret);
+ ret = set_dh_params(dh, p, g);
if (ret) {
DH_free(dh);
goto err_dh;
@@ -1087,7 +1087,7 @@ err_dh:
goto err_dsa;
}
- set_dsa_params(dsa, p, q, g, ret);
+ ret = set_dsa_params(dsa, p, q, g);
if (ret) {
DSA_free(dsa);
OPENSSL_LOG(ERR, "Failed to dsa params\n");
@@ -1101,7 +1101,7 @@ err_dh:
* both versions
*/
/* just set dummy public for very 1st call */
- set_dsa_keys(dsa, pub_key, priv_key, ret);
+ ret = set_dsa_keys(dsa, pub_key, priv_key);
if (ret) {
DSA_free(dsa);
OPENSSL_LOG(ERR, "Failed to set keys\n");
diff --git a/drivers/crypto/qat/qat_sym_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h
index eea08bc7..7cba87d6 100644
--- a/drivers/crypto/qat/qat_sym_capabilities.h
+++ b/drivers/crypto/qat/qat_sym_capabilities.h
@@ -154,6 +154,26 @@
}, } \
}, } \
}, \
+ { /* AES CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ } \
+ }, } \
+ }, } \
+ }, \
{ /* AES CCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index 96f442e8..c3f70040 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -290,6 +290,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
break;
case QAT_GEN2:
+ case QAT_GEN3:
internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
break;
default:
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
index d3432854..5563d5be 100644
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ b/drivers/crypto/qat/qat_sym_pmd.h
@@ -12,7 +12,7 @@
#include "qat_sym_capabilities.h"
#include "qat_device.h"
-/**< Intel(R) QAT Symmetric Crypto PMD device name */
+/** Intel(R) QAT Symmetric Crypto PMD driver name */
#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
extern uint8_t cryptodev_qat_driver_id;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 1d58220a..8196e233 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -498,6 +498,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
struct qat_sym_dev_private *internals = dev->data->dev_private;
uint8_t *key_data = auth_xform->key.data;
uint8_t key_length = auth_xform->key.length;
+ session->aes_cmac = 0;
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -518,6 +519,10 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ session->aes_cmac = 1;
+ break;
case RTE_CRYPTO_AUTH_AES_GMAC:
if (qat_sym_validate_aes_key(auth_xform->key.length,
&session->qat_cipher_alg) != 0) {
@@ -555,7 +560,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
auth_xform->algo);
@@ -817,6 +821,8 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
return ICP_QAT_HW_SHA512_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_MD5:
return ICP_QAT_HW_MD5_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum digest size in this case */
return ICP_QAT_HW_SHA512_STATE1_SZ;
@@ -843,6 +849,8 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
return SHA512_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
return 16;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return ICP_QAT_HW_AES_BLK_SZ;
case ICP_QAT_HW_AUTH_ALGO_MD5:
return MD5_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
@@ -991,11 +999,28 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
#define HMAC_OPAD_VALUE 0x5c
#define HASH_XCBC_PRECOMP_KEY_NUM 3
+static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
+
+static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
+{
+ int i;
+
+ derived[0] = base[0] << 1;
+ for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
+ derived[i] = base[i] << 1;
+ derived[i - 1] |= base[i] >> 7;
+ }
+
+ if (base[0] & 0x80)
+ derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
+}
+
static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
const uint8_t *auth_key,
uint16_t auth_keylen,
uint8_t *p_state_buf,
- uint16_t *p_state_len)
+ uint16_t *p_state_len,
+ uint8_t aes_cmac)
{
int block_size;
uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
@@ -1003,47 +1028,91 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
int i;
if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
- static uint8_t qat_aes_xcbc_key_seed[
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
- };
- uint8_t *in = NULL;
- uint8_t *out = p_state_buf;
- int x;
- AES_KEY enc_key;
+ /* CMAC */
+ if (aes_cmac) {
+ AES_KEY enc_key;
+ uint8_t *in = NULL;
+ uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
+ uint8_t *k1, *k2;
- in = rte_zmalloc("working mem for key",
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
- if (in == NULL) {
- QAT_LOG(ERR, "Failed to alloc memory");
- return -ENOMEM;
- }
+ auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
+
+ in = rte_zmalloc("AES CMAC K1",
+ ICP_QAT_HW_AES_128_KEY_SZ, 16);
+
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(in, AES_CMAC_SEED,
+ ICP_QAT_HW_AES_128_KEY_SZ);
+ rte_memcpy(p_state_buf, auth_key, auth_keylen);
- rte_memcpy(in, qat_aes_xcbc_key_seed,
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
- for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
&enc_key) != 0) {
- rte_free(in -
- (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
- memset(out -
- (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
- 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ rte_free(in);
return -EFAULT;
}
- AES_encrypt(in, out, &enc_key);
- in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
- out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+
+ AES_encrypt(in, k0, &enc_key);
+
+ k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+
+ aes_cmac_key_derive(k0, k1);
+ aes_cmac_key_derive(k1, k2);
+
+ memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in);
+ return 0;
+ } else {
+ static uint8_t qat_aes_xcbc_key_seed[
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ int x;
+ AES_KEY enc_key;
+
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(in, qat_aes_xcbc_key_seed,
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+ if (AES_set_encrypt_key(auth_key,
+ auth_keylen << 3,
+ &enc_key) != 0) {
+ rte_free(in -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+ memset(out -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+ 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ }
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+ return 0;
}
- *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
- rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
- return 0;
+
} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
uint8_t *in = NULL;
@@ -1417,7 +1486,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
- || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
+ )
hash->auth_counter.counter = 0;
else
hash->auth_counter.counter = rte_bswap32(
@@ -1430,40 +1501,45 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
*/
switch (cdesc->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA224:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA384:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
@@ -1471,10 +1547,16 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+
+ if (cdesc->aes_cmac)
+ memset(cdesc->cd_cur_ptr, 0, state1_size);
if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
- &state2_size)) {
- QAT_LOG(ERR, "(XCBC)precompute failed");
+ &state2_size, cdesc->aes_cmac)) {
+ cdesc->aes_cmac ? QAT_LOG(ERR,
+ "(CMAC)precompute failed")
+ : QAT_LOG(ERR,
+ "(XCBC)precompute failed");
return -EFAULT;
}
break;
@@ -1482,9 +1564,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
- &state2_size)) {
+ if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
+ authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size, cdesc->aes_cmac)) {
QAT_LOG(ERR, "(GCM)precompute failed");
return -EFAULT;
}
@@ -1542,9 +1624,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
- authkey, authkeylen, cdesc->cd_cur_ptr,
- &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(MD5)precompute failed");
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index e8f51e5b..43e25ceb 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -36,6 +36,8 @@
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
+#define QAT_AES_CMAC_CONST_RB 0x87
+
enum qat_sym_proto_flag {
QAT_CRYPTO_PROTO_FLAG_NONE = 0,
QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -75,6 +77,7 @@ struct qat_sym_session {
uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
enum qat_device_gen min_qat_dev_gen;
+ uint8_t aes_cmac;
};
int
diff --git a/drivers/crypto/scheduler/meson.build b/drivers/crypto/scheduler/meson.build
new file mode 100644
index 00000000..c5ba2d68
--- /dev/null
+++ b/drivers/crypto/scheduler/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+deps += ['bus_vdev', 'reorder']
+name = 'crypto_scheduler'
+sources = files(
+ 'rte_cryptodev_scheduler.c',
+ 'scheduler_failover.c',
+ 'scheduler_multicore.c',
+ 'scheduler_pkt_size_distr.c',
+ 'scheduler_pmd.c',
+ 'scheduler_pmd_ops.c',
+ 'scheduler_roundrobin.c',
+)
+
+headers = files(
+ 'rte_cryptodev_scheduler.h',
+ 'rte_cryptodev_scheduler_operations.h',
+)
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 6e4919c4..a2142860 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -174,7 +174,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -233,7 +233,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -290,7 +290,7 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -308,28 +308,28 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
switch (mode) {
case CDEV_SCHED_MODE_ROUNDROBIN:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- roundrobin_scheduler) < 0) {
+ crypto_scheduler_roundrobin) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- pkt_size_based_distr_scheduler) < 0) {
+ crypto_scheduler_pkt_size_based_distr) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_FAILOVER:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- failover_scheduler) < 0) {
+ crypto_scheduler_failover) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_MULTICORE:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- multicore_scheduler) < 0) {
+ crypto_scheduler_multicore) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
@@ -353,7 +353,7 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -375,7 +375,7 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -403,7 +403,7 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -425,7 +425,7 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -498,7 +498,7 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -566,7 +566,7 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
return -EINVAL;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 3faea409..9a72a90a 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -270,13 +270,13 @@ struct rte_cryptodev_scheduler {
};
/** Round-robin mode scheduler */
-extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin;
/** Packet-size based distribution mode scheduler */
-extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr;
/** Fail-over mode scheduler */
-extern struct rte_cryptodev_scheduler *failover_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_failover;
/** multi-core mode scheduler */
-extern struct rte_cryptodev_scheduler *multicore_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_multicore;
#ifdef __cplusplus
}
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index ddfb5b81..3a023b8a 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -197,7 +197,7 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
return 0;
}
-struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -208,7 +208,7 @@ struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
NULL /*option_get */
};
-struct rte_cryptodev_scheduler fo_scheduler = {
+static struct rte_cryptodev_scheduler fo_scheduler = {
.name = "failover-scheduler",
.description = "scheduler which enqueues to the primary slave, "
"and only then enqueues to the secondary slave "
@@ -217,4 +217,4 @@ struct rte_cryptodev_scheduler fo_scheduler = {
.ops = &scheduler_fo_ops
};
-struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_failover = &fo_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index d410e69d..7808e9a3 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -392,7 +392,7 @@ exit:
return -1;
}
-struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -403,11 +403,11 @@ struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
NULL /* option_get */
};
-struct rte_cryptodev_scheduler mc_scheduler = {
+static struct rte_cryptodev_scheduler mc_scheduler = {
.name = "multicore-scheduler",
.description = "scheduler which will run burst across multiple cpu cores",
.mode = CDEV_SCHED_MODE_MULTICORE,
.ops = &scheduler_mc_ops
};
-struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 74129b66..45c8dceb 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -398,7 +398,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
return 0;
}
-struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -409,7 +409,7 @@ struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
scheduler_option_get
};
-struct rte_cryptodev_scheduler psd_scheduler = {
+static struct rte_cryptodev_scheduler psd_scheduler = {
.name = "packet-size-based-scheduler",
.description = "scheduler which will distribute crypto op "
"burst based on the packet size",
@@ -417,4 +417,4 @@ struct rte_cryptodev_scheduler psd_scheduler = {
.ops = &scheduler_ps_ops
};
-struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr = &psd_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index a9221a94..20198ccb 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -14,7 +14,7 @@
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
-uint8_t cryptodev_driver_id;
+uint8_t cryptodev_scheduler_driver_id;
struct scheduler_init_params {
struct rte_cryptodev_pmd_init_params def_p;
@@ -38,7 +38,7 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
-const char *scheduler_valid_params[] = {
+static const char * const scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
RTE_CRYPTODEV_VDEV_SLAVE,
RTE_CRYPTODEV_VDEV_MODE,
@@ -91,7 +91,7 @@ cryptodev_scheduler_create(const char *name,
return -EFAULT;
}
- dev->driver_id = cryptodev_driver_id;
+ dev->driver_id = cryptodev_scheduler_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
sched_ctx = dev->data->dev_private;
@@ -569,4 +569,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
cryptodev_scheduler_pmd_drv.driver,
- cryptodev_driver_id);
+ cryptodev_scheduler_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 778071ca..939105aa 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -522,7 +522,7 @@ scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
}
}
-struct rte_cryptodev_ops scheduler_pmd_ops = {
+static struct rte_cryptodev_ops scheduler_pmd_ops = {
.dev_configure = scheduler_pmd_config,
.dev_start = scheduler_pmd_start,
.dev_stop = scheduler_pmd_stop,
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index d5e602a2..3ed480c1 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -63,7 +63,7 @@ struct scheduler_qp_ctx {
} __rte_cache_aligned;
-extern uint8_t cryptodev_driver_id;
+extern uint8_t cryptodev_scheduler_driver_id;
static __rte_always_inline uint16_t
get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index c7082a64..9b891d97 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -190,7 +190,7 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
return 0;
}
-struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -201,7 +201,7 @@ struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
NULL /* option_get */
};
-struct rte_cryptodev_scheduler scheduler = {
+static struct rte_cryptodev_scheduler scheduler = {
.name = "roundrobin-scheduler",
.description = "scheduler which will round robin burst across "
"slave crypto devices",
@@ -209,4 +209,4 @@ struct rte_cryptodev_scheduler scheduler = {
.ops = &scheduler_rr_ops
};
-struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin = &scheduler;
diff --git a/drivers/crypto/zuc/meson.build b/drivers/crypto/zuc/meson.build
new file mode 100644
index 00000000..b8ca7107
--- /dev/null
+++ b/drivers/crypto/zuc/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('libsso_zuc', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('rte_zuc_pmd.c', 'rte_zuc_pmd_ops.c')
+deps += ['bus_vdev']
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index f301d8dc..03ad1b6c 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -6,6 +6,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV) += dsw
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa
diff --git a/drivers/event/dpaa/Makefile b/drivers/event/dpaa/Makefile
index ddd85522..6f93e7f4 100644
--- a/drivers/event/dpaa/Makefile
+++ b/drivers/event/dpaa/Makefile
@@ -34,5 +34,6 @@ LDLIBS += -lrte_mempool_dpaa
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_eventdev -lrte_pmd_dpaa -lrte_bus_vdev
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 5443ef56..1e247e4f 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -30,6 +30,7 @@
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
#include <rte_cycles.h>
+#include <rte_kvargs.h>
#include <dpaa_ethdev.h>
#include "dpaa_eventdev.h"
@@ -43,19 +44,31 @@
* 1 Eventdev can have N Eventqueue
*/
+#define DISABLE_INTR_MODE "disable_intr"
+
static int
dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
uint64_t *timeout_ticks)
{
- uint64_t cycles_per_second;
-
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
+ uint64_t cycles_per_second;
+
cycles_per_second = rte_get_timer_hz();
- *timeout_ticks = ns * (cycles_per_second / NS_PER_S);
+ *timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
+
+ return 0;
+}
+
+static int
+dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ RTE_SET_USED(dev);
+ *timeout_ticks = ns/1000;
return 0;
}
@@ -100,6 +113,56 @@ dpaa_event_enqueue(void *port, const struct rte_event *ev)
return dpaa_event_enqueue_burst(port, ev, 1);
}
+static void drain_4_bytes(int fd, fd_set *fdset)
+{
+ if (FD_ISSET(fd, fdset)) {
+ /* drain 4 bytes */
+ uint32_t junk;
+ ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
+ if (sjunk != sizeof(junk))
+ DPAA_EVENTDEV_ERR("UIO irq read error");
+ }
+}
+
+static inline int
+dpaa_event_dequeue_wait(uint64_t timeout_ticks)
+{
+ int fd_qman, nfds;
+ int ret;
+ fd_set readset;
+
+ /* Go into (and back out of) IRQ mode for each select,
+ * it simplifies exit-path considerations and other
+ * potential nastiness.
+ */
+ struct timeval tv = {
+ .tv_sec = timeout_ticks / 1000000,
+ .tv_usec = timeout_ticks % 1000000
+ };
+
+ fd_qman = qman_thread_fd();
+ nfds = fd_qman + 1;
+ FD_ZERO(&readset);
+ FD_SET(fd_qman, &readset);
+
+ qman_irqsource_add(QM_PIRQ_DQRI);
+
+ ret = select(nfds, &readset, NULL, NULL, &tv);
+ if (ret < 0)
+ return ret;
+ /* Calling irqsource_remove() prior to thread_irq()
+ * means thread_irq() will not process whatever caused
+ * the interrupts, however it does ensure that, once
+ * thread_irq() re-enables interrupts, they won't fire
+ * again immediately.
+ */
+ qman_irqsource_remove(~0);
+ drain_4_bytes(fd_qman, &readset);
+ qman_thread_irq();
+
+ return ret;
+}
+
static uint16_t
dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks)
@@ -107,8 +170,8 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
int ret;
u16 ch_id;
void *buffers[8];
- u32 num_frames, i;
- uint64_t wait_time, cur_ticks, start_ticks;
+ u32 num_frames, i, irq = 0;
+ uint64_t cur_ticks = 0, wait_time_ticks = 0;
struct dpaa_port *portal = (struct dpaa_port *)port;
struct rte_mbuf *mbuf;
@@ -147,20 +210,21 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
}
DPAA_PER_LCORE_DQRR_HELD = 0;
- if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
- wait_time = timeout_ticks;
+ if (timeout_ticks)
+ wait_time_ticks = timeout_ticks;
else
- wait_time = portal->timeout;
+ wait_time_ticks = portal->timeout_us;
- /* Lets dequeue the frames */
- start_ticks = rte_get_timer_cycles();
- wait_time += start_ticks;
+ wait_time_ticks += rte_get_timer_cycles();
do {
+ /* Lets dequeue the frames */
num_frames = qman_portal_dequeue(ev, nb_events, buffers);
- if (num_frames != 0)
+ if (irq)
+ irq = 0;
+ if (num_frames)
break;
cur_ticks = rte_get_timer_cycles();
- } while (cur_ticks < wait_time);
+ } while (cur_ticks < wait_time_ticks);
return num_frames;
}
@@ -171,11 +235,91 @@ dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
}
+static uint16_t
+dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ int ret;
+ u16 ch_id;
+ void *buffers[8];
+ u32 num_frames, i, irq = 0;
+ uint64_t cur_ticks = 0, wait_time_ticks = 0;
+ struct dpaa_port *portal = (struct dpaa_port *)port;
+ struct rte_mbuf *mbuf;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ /* Affine current thread context to a qman portal */
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_EVENTDEV_ERR("Unable to initialize portal");
+ return ret;
+ }
+ }
+
+ if (unlikely(!portal->is_port_linked)) {
+ /*
+ * Affine event queue for current thread context
+ * to a qman portal.
+ */
+ for (i = 0; i < portal->num_linked_evq; i++) {
+ ch_id = portal->evq_info[i].ch_id;
+ dpaa_eventq_portal_add(ch_id);
+ }
+ portal->is_port_linked = true;
+ }
+
+ /* Check if there are atomic contexts to be released */
+ i = 0;
+ while (DPAA_PER_LCORE_DQRR_SIZE) {
+ if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
+ qman_dca_index(i, 0);
+ mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+ mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ }
+ i++;
+ }
+ DPAA_PER_LCORE_DQRR_HELD = 0;
+
+ if (timeout_ticks)
+ wait_time_ticks = timeout_ticks;
+ else
+ wait_time_ticks = portal->timeout_us;
+
+ do {
+ /* Lets dequeue the frames */
+ num_frames = qman_portal_dequeue(ev, nb_events, buffers);
+ if (irq)
+ irq = 0;
+ if (num_frames)
+ break;
+ if (wait_time_ticks) { /* wait for time */
+ if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
+ irq = 1;
+ continue;
+ }
+ break; /* no event after waiting */
+ }
+ cur_ticks = rte_get_timer_cycles();
+ } while (cur_ticks < wait_time_ticks);
+
+ return num_frames;
+}
+
+static uint16_t
+dpaa_event_dequeue_intr(void *port,
+ struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
+}
+
static void
dpaa_event_dev_info_get(struct rte_eventdev *dev,
struct rte_event_dev_info *dev_info)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
dev_info->driver_name = "event_dpaa";
@@ -184,7 +328,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
dev_info->max_dequeue_timeout_ns =
DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
dev_info->dequeue_timeout_ns =
- DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+ DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
dev_info->max_event_queues =
DPAA_EVENT_MAX_QUEUES;
dev_info->max_event_queue_flows =
@@ -220,8 +364,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
int ret, i;
uint32_t *ch_id;
- EVENTDEV_DRV_FUNC_TRACE();
-
+ EVENTDEV_INIT_FUNC_TRACE();
priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
priv->nb_events_limit = conf->nb_events_limit;
priv->nb_event_queues = conf->nb_event_queues;
@@ -231,26 +374,18 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
priv->event_dev_cfg = conf->event_dev_cfg;
- /* Check dequeue timeout method is per dequeue or global */
- if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
- /*
- * Use timeout value as given in dequeue operation.
- * So invalidating this timetout value.
- */
- priv->dequeue_timeout_ns = 0;
- }
-
ch_id = rte_malloc("dpaa-channels",
sizeof(uint32_t) * priv->nb_event_queues,
RTE_CACHE_LINE_SIZE);
if (ch_id == NULL) {
- EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
+ DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
return -ENOMEM;
}
/* Create requested event queues within the given event device */
ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
if (ret < 0) {
- EVENTDEV_DRV_ERR("Failed to create internal channel\n");
+ DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
+ priv->nb_event_queues, ret);
rte_free(ch_id);
return ret;
}
@@ -260,30 +395,41 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
/* Lets prepare event ports */
memset(&priv->ports[0], 0,
sizeof(struct dpaa_port) * priv->nb_event_ports);
+
+ /* Check dequeue timeout method is per dequeue or global */
if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
- for (i = 0; i < priv->nb_event_ports; i++) {
- priv->ports[i].timeout =
- DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
- }
- } else if (priv->dequeue_timeout_ns == 0) {
- for (i = 0; i < priv->nb_event_ports; i++) {
- dpaa_event_dequeue_timeout_ticks(NULL,
- DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
- &priv->ports[i].timeout);
- }
+ /*
+ * Use timeout value as given in dequeue operation.
+ * So invalidating this timeout value.
+ */
+ priv->dequeue_timeout_ns = 0;
+
+ } else if (conf->dequeue_timeout_ns == 0) {
+ priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
} else {
- for (i = 0; i < priv->nb_event_ports; i++) {
- dpaa_event_dequeue_timeout_ticks(NULL,
- priv->dequeue_timeout_ns,
- &priv->ports[i].timeout);
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ }
+
+ for (i = 0; i < priv->nb_event_ports; i++) {
+ if (priv->intr_mode) {
+ priv->ports[i].timeout_us =
+ priv->dequeue_timeout_ns/1000;
+ } else {
+ uint64_t cycles_per_second;
+
+ cycles_per_second = rte_get_timer_hz();
+ priv->ports[i].timeout_us =
+ (priv->dequeue_timeout_ns * cycles_per_second)
+ / NS_PER_S;
}
}
+
/*
* TODO: Currently portals are affined with threads. Maximum threads
* can be created equals to number of lcore.
*/
rte_free(ch_id);
- EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
+ DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
return 0;
}
@@ -291,7 +437,7 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
static int
dpaa_event_dev_start(struct rte_eventdev *dev)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
return 0;
@@ -300,14 +446,14 @@ dpaa_event_dev_start(struct rte_eventdev *dev)
static void
dpaa_event_dev_stop(struct rte_eventdev *dev)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
}
static int
dpaa_event_dev_close(struct rte_eventdev *dev)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
return 0;
@@ -317,7 +463,7 @@ static void
dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
struct rte_event_queue_conf *queue_conf)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(queue_id);
@@ -334,14 +480,14 @@ dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
struct dpaa_eventdev *priv = dev->data->dev_private;
struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
switch (queue_conf->schedule_type) {
case RTE_SCHED_TYPE_PARALLEL:
case RTE_SCHED_TYPE_ATOMIC:
break;
case RTE_SCHED_TYPE_ORDERED:
- EVENTDEV_DRV_ERR("Schedule type is not supported.");
+ DPAA_EVENTDEV_ERR("Schedule type is not supported.");
return -1;
}
evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
@@ -353,7 +499,7 @@ dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
static void
dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(queue_id);
@@ -363,7 +509,7 @@ static void
dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
struct rte_event_port_conf *port_conf)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(port_id);
@@ -379,7 +525,7 @@ dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
{
struct dpaa_eventdev *eventdev = dev->data->dev_private;
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(port_conf);
dev->data->ports[port_id] = &eventdev->ports[port_id];
@@ -390,7 +536,7 @@ dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
static void
dpaa_event_port_release(void *port)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(port);
}
@@ -454,7 +600,8 @@ dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
event_queue->event_port = NULL;
}
- event_port->num_linked_evq = event_port->num_linked_evq - i;
+ if (event_port->num_linked_evq)
+ event_port->num_linked_evq = event_port->num_linked_evq - i;
return (int)i;
}
@@ -466,7 +613,7 @@ dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
{
const char *ethdev_driver = eth_dev->device->driver->name;
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
@@ -491,14 +638,14 @@ dpaa_event_eth_rx_adapter_queue_add(
struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
int ret, i;
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
if (rx_queue_id == -1) {
for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
queue_conf);
if (ret) {
- EVENTDEV_DRV_ERR(
+ DPAA_EVENTDEV_ERR(
"Event Queue attach failed:%d\n", ret);
goto detach_configured_queues;
}
@@ -508,7 +655,7 @@ dpaa_event_eth_rx_adapter_queue_add(
ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
if (ret)
- EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
+ DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
return ret;
detach_configured_queues:
@@ -527,14 +674,14 @@ dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
int ret, i;
struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
if (rx_queue_id == -1) {
for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
ret = dpaa_eth_eventq_detach(eth_dev, i);
if (ret)
- EVENTDEV_DRV_ERR(
+ DPAA_EVENTDEV_ERR(
"Event Queue detach failed:%d\n", ret);
}
@@ -543,7 +690,7 @@ dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
if (ret)
- EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
+ DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
return ret;
}
@@ -551,7 +698,7 @@ static int
dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(eth_dev);
@@ -563,7 +710,7 @@ static int
dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- EVENTDEV_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(eth_dev);
@@ -593,8 +740,44 @@ static struct rte_eventdev_ops dpaa_eventdev_ops = {
.eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
};
+static int flag_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa_event_check_flags(const char *params)
+{
+ struct rte_kvargs *kvlist;
+
+ if (params == NULL || params[0] == '\0')
+ return 0;
+
+ kvlist = rte_kvargs_parse(params, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ /* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
+ if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
+ flag_check_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
static int
-dpaa_event_dev_create(const char *name)
+dpaa_event_dev_create(const char *name, const char *params)
{
struct rte_eventdev *eventdev;
struct dpaa_eventdev *priv;
@@ -603,21 +786,30 @@ dpaa_event_dev_create(const char *name)
sizeof(struct dpaa_eventdev),
rte_socket_id());
if (eventdev == NULL) {
- EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
+ DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
goto fail;
}
+ priv = eventdev->data->dev_private;
eventdev->dev_ops = &dpaa_eventdev_ops;
eventdev->enqueue = dpaa_event_enqueue;
eventdev->enqueue_burst = dpaa_event_enqueue_burst;
- eventdev->dequeue = dpaa_event_dequeue;
- eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+
+ if (dpaa_event_check_flags(params)) {
+ eventdev->dequeue = dpaa_event_dequeue;
+ eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+ } else {
+ priv->intr_mode = 1;
+ eventdev->dev_ops->timeout_ticks =
+ dpaa_event_dequeue_timeout_ticks_intr;
+ eventdev->dequeue = dpaa_event_dequeue_intr;
+ eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
+ }
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- priv = eventdev->data->dev_private;
priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
return 0;
@@ -629,11 +821,14 @@ static int
dpaa_event_dev_probe(struct rte_vdev_device *vdev)
{
const char *name;
+ const char *params;
name = rte_vdev_device_name(vdev);
- EVENTDEV_DRV_LOG("Initializing %s", name);
+ DPAA_EVENTDEV_INFO("Initializing %s", name);
+
+ params = rte_vdev_device_args(vdev);
- return dpaa_event_dev_create(name);
+ return dpaa_event_dev_create(name, params);
}
static int
@@ -642,7 +837,7 @@ dpaa_event_dev_remove(struct rte_vdev_device *vdev)
const char *name;
name = rte_vdev_device_name(vdev);
- EVENTDEV_DRV_LOG("Closing %s", name);
+ DPAA_EVENTDEV_INFO("Closing %s", name);
return rte_event_pmd_vdev_uninit(name);
}
@@ -653,3 +848,5 @@ static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
};
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
+ DISABLE_INTR_MODE "=<int>");
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 583e46ca..8134e6ba 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -12,15 +12,8 @@
#define EVENTDEV_NAME_DPAA_PMD event_dpaa1
-#define EVENTDEV_DRV_LOG(fmt, args...) \
- DPAA_EVENTDEV_INFO(fmt, ## args)
-#define EVENTDEV_DRV_FUNC_TRACE() \
- DPAA_EVENTDEV_DEBUG("%s() Called:\n", __func__)
-#define EVENTDEV_DRV_ERR(fmt, args...) \
- DPAA_EVENTDEV_ERR("%s(): " fmt "\n", __func__, ## args)
-
-#define DPAA_EVENT_MAX_PORTS 8
-#define DPAA_EVENT_MAX_QUEUES 16
+#define DPAA_EVENT_MAX_PORTS 4
+#define DPAA_EVENT_MAX_QUEUES 8
#define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT 1
#define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
#define DPAA_EVENT_MAX_QUEUE_FLOWS 2048
@@ -28,7 +21,7 @@
#define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
#define DPAA_EVENT_MAX_EVENT_PORT RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
#define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
-#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100000UL
#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID ((uint64_t)-1)
#define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH 1
#define DPAA_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
@@ -61,7 +54,7 @@ struct dpaa_port {
struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
uint8_t num_linked_evq;
uint8_t is_port_linked;
- uint64_t timeout;
+ uint64_t timeout_us;
};
struct dpaa_eventdev {
@@ -72,7 +65,7 @@ struct dpaa_eventdev {
uint8_t max_event_queues;
uint8_t nb_event_queues;
uint8_t nb_event_ports;
- uint8_t resvd;
+ uint8_t intr_mode;
uint32_t nb_event_queue_flows;
uint32_t nb_event_port_dequeue_depth;
uint32_t nb_event_port_enqueue_depth;
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index 5e1a6320..e0134cc4 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -21,13 +21,19 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
LDLIBS += -lrte_eal -lrte_eventdev
LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_common_dpaax
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
+ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+LDLIBS += -lrte_pmd_dpaa2_sec
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec
+endif
+
# versioning export map
EXPORT_MAP := rte_pmd_dpaa2_event_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# depends on fslmc bus which uses experimental API
CFLAGS += -DALLOW_EXPERIMENTAL_API
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index ea1e5cc6..8d168b02 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -27,6 +27,7 @@
#include <rte_pci.h>
#include <rte_bus_vdev.h>
#include <rte_ethdev_driver.h>
+#include <rte_cryptodev.h>
#include <rte_event_eth_rx_adapter.h>
#include <fslmc_vfio.h>
@@ -34,6 +35,9 @@
#include <dpaa2_hw_mempool.h>
#include <dpaa2_hw_dpio.h>
#include <dpaa2_ethdev.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <dpaa2_sec_event.h>
+#endif
#include "dpaa2_eventdev.h"
#include "dpaa2_eventdev_logs.h"
#include <portal/dpaa2_hw_pvt.h>
@@ -54,34 +58,63 @@ static uint16_t
dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
- struct rte_eventdev *ev_dev =
- ((struct dpaa2_io_portal_t *)port)->eventdev;
- struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
+
+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_dpio_dev *dpio_dev;
uint32_t queue_id = ev[0].queue_id;
- struct evq_info_t *evq_info = &priv->evq_info[queue_id];
+ struct dpaa2_eventq *evq_info;
uint32_t fqid;
struct qbman_swp *swp;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
uint32_t loop, frames_to_send;
struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
uint16_t num_tx = 0;
- int ret;
-
- RTE_SET_USED(port);
+ int i, n, ret;
+ uint8_t channel_index;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ /* Affine current thread context to a qman portal */
ret = dpaa2_affine_qbman_swp();
- if (ret) {
+ if (ret < 0) {
DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
-
+ /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
swp = DPAA2_PER_LCORE_PORTAL;
+ if (likely(dpaa2_portal->is_port_linked))
+ goto skip_linking;
+
+ /* Create mapping between portal and channel to receive packets */
+ for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+ evq_info = &dpaa2_portal->evq_info[i];
+ if (!evq_info->event_port)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+ CMD_PRI_LOW,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id,
+ &channel_index);
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(swp, channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ }
+ dpaa2_portal->is_port_linked = true;
+
+skip_linking:
+ evq_info = &dpaa2_portal->evq_info[queue_id];
+
while (nb_events) {
- frames_to_send = (nb_events >> 3) ?
- MAX_TX_RING_SLOTS : nb_events;
+ frames_to_send = (nb_events > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_events;
for (loop = 0; loop < frames_to_send; loop++) {
const struct rte_event *event = &ev[num_tx + loop];
@@ -99,14 +132,14 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
- if (event->mbuf->seqn) {
+ if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
+ && event->mbuf->seqn) {
uint8_t dqrr_index = event->mbuf->seqn - 1;
qbman_eq_desc_set_dca(&eqdesc[loop], 1,
dqrr_index, 0);
DPAA2_PER_LCORE_DQRR_SIZE--;
- DPAA2_PER_LCORE_DQRR_HELD &=
- ~(1 << dqrr_index);
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
}
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
@@ -116,7 +149,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
* to avoid copy
*/
struct rte_event *ev_temp = rte_malloc(NULL,
- sizeof(struct rte_event), 0);
+ sizeof(struct rte_event), 0);
if (!ev_temp) {
if (!loop)
@@ -143,6 +176,18 @@ send_partial:
}
return num_tx;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &dpaa2_portal->evq_info[n];
+ if (!evq_info->event_port)
+ continue;
+ qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ return 0;
+
}
static uint16_t
@@ -197,6 +242,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
ev->mbuf->seqn = dqrr_index + 1;
DPAA2_PER_LCORE_DQRR_SIZE++;
DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
}
static uint16_t
@@ -204,22 +250,53 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks)
{
const struct qbman_result *dq;
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_eventq *evq_info;
struct qbman_swp *swp;
const struct qbman_fd *fd;
struct dpaa2_queue *rxq;
- int num_pkts = 0, ret, i = 0;
-
- RTE_SET_USED(port);
+ int num_pkts = 0, ret, i = 0, n;
+ uint8_t channel_index;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ /* Affine current thread context to a qman portal */
ret = dpaa2_affine_qbman_swp();
- if (ret) {
+ if (ret < 0) {
DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
+
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
swp = DPAA2_PER_LCORE_PORTAL;
+ if (likely(dpaa2_portal->is_port_linked))
+ goto skip_linking;
+
+ /* Create mapping between portal and channel to receive packets */
+ for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+ evq_info = &dpaa2_portal->evq_info[i];
+ if (!evq_info->event_port)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+ CMD_PRI_LOW,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id,
+ &channel_index);
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(swp, channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ }
+ dpaa2_portal->is_port_linked = true;
+
+skip_linking:
/* Check if there are atomic contexts to be released */
while (DPAA2_PER_LCORE_DQRR_SIZE) {
if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
@@ -258,6 +335,18 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
} while (num_pkts < nb_events);
return num_pkts;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &dpaa2_portal->evq_info[n];
+ if (!evq_info->event_port)
+ continue;
+
+ qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ return 0;
}
static uint16_t
@@ -283,7 +372,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
dev_info->max_dequeue_timeout_ns =
DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
dev_info->dequeue_timeout_ns =
- DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
dev_info->max_event_queues = priv->max_event_queues;
dev_info->max_event_queue_flows =
DPAA2_EVENT_MAX_QUEUE_FLOWS;
@@ -292,6 +381,9 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
dev_info->max_event_priority_levels =
DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
+ /* we only support dpio upto number of cores*/
+ if (dev_info->max_event_ports > rte_lcore_count())
+ dev_info->max_event_ports = rte_lcore_count();
dev_info->max_event_port_dequeue_depth =
DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
dev_info->max_event_port_enqueue_depth =
@@ -313,7 +405,6 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
EVENTDEV_INIT_FUNC_TRACE();
- priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
priv->nb_event_queues = conf->nb_event_queues;
priv->nb_event_ports = conf->nb_event_ports;
priv->nb_event_queue_flows = conf->nb_event_queue_flows;
@@ -321,6 +412,20 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
priv->event_dev_cfg = conf->event_dev_cfg;
+ /* Check dequeue timeout method is per dequeue or global */
+ if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ /*
+ * Use timeout value as given in dequeue operation.
+ * So invalidating this timeout value.
+ */
+ priv->dequeue_timeout_ns = 0;
+
+ } else if (conf->dequeue_timeout_ns == 0) {
+ priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
+ } else {
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ }
+
DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
dev->data->dev_id);
return 0;
@@ -370,31 +475,39 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
}
-static void
-dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
-{
- EVENTDEV_INIT_FUNC_TRACE();
-
- RTE_SET_USED(dev);
- RTE_SET_USED(queue_id);
-}
-
static int
dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
const struct rte_event_queue_conf *queue_conf)
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
- struct evq_info_t *evq_info =
- &priv->evq_info[queue_id];
+ struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
EVENTDEV_INIT_FUNC_TRACE();
+ switch (queue_conf->schedule_type) {
+ case RTE_SCHED_TYPE_PARALLEL:
+ case RTE_SCHED_TYPE_ATOMIC:
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
+ return -1;
+ }
evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+ evq_info->event_queue_id = queue_id;
return 0;
}
static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static void
dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
struct rte_event_port_conf *port_conf)
{
@@ -402,7 +515,6 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
RTE_SET_USED(dev);
RTE_SET_USED(port_id);
- RTE_SET_USED(port_conf);
port_conf->new_event_threshold =
DPAA2_EVENT_MAX_NUM_EVENTS;
@@ -413,56 +525,44 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
port_conf->disable_implicit_release = 0;
}
-static void
-dpaa2_eventdev_port_release(void *port)
-{
- EVENTDEV_INIT_FUNC_TRACE();
-
- RTE_SET_USED(port);
-}
-
static int
dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
const struct rte_event_port_conf *port_conf)
{
+ char event_port_name[32];
+ struct dpaa2_port *portal;
+
EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(port_conf);
- if (!dpaa2_io_portal[port_id].dpio_dev) {
- dpaa2_io_portal[port_id].dpio_dev =
- dpaa2_get_qbman_swp(port_id);
- rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
- if (!dpaa2_io_portal[port_id].dpio_dev)
- return -1;
+ sprintf(event_port_name, "event-port-%d", port_id);
+ portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
+ if (!portal) {
+ DPAA2_EVENTDEV_ERR("Memory allocation failure");
+ return -ENOMEM;
}
- dpaa2_io_portal[port_id].eventdev = dev;
- dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
+ memset(portal, 0, sizeof(struct dpaa2_port));
+ dev->data->ports[port_id] = portal;
return 0;
}
-static int
-dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
- uint8_t queues[], uint16_t nb_unlinks)
+static void
+dpaa2_eventdev_port_release(void *port)
{
- struct dpaa2_eventdev *priv = dev->data->dev_private;
- struct dpaa2_io_portal_t *dpaa2_portal = port;
- struct evq_info_t *evq_info;
- int i;
+ struct dpaa2_port *portal = port;
EVENTDEV_INIT_FUNC_TRACE();
- for (i = 0; i < nb_unlinks; i++) {
- evq_info = &priv->evq_info[queues[i]];
- qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
- evq_info->dpcon->channel_index, 0);
- dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
- 0, dpaa2_portal->dpio_dev->token,
- evq_info->dpcon->dpcon_id);
- }
+ /* TODO: Cleanup is required when ports are in linked state. */
+ if (portal->is_port_linked)
+ DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
- return (int)nb_unlinks;
+ if (portal)
+ rte_free(portal);
+
+ portal = NULL;
}
static int
@@ -471,51 +571,71 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
uint16_t nb_links)
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
- struct dpaa2_io_portal_t *dpaa2_portal = port;
- struct evq_info_t *evq_info;
- uint8_t channel_index;
- int ret, i, n;
+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_eventq *evq_info;
+ uint16_t i;
EVENTDEV_INIT_FUNC_TRACE();
+ RTE_SET_USED(priorities);
+
for (i = 0; i < nb_links; i++) {
evq_info = &priv->evq_info[queues[i]];
+ memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
+ sizeof(struct dpaa2_eventq));
+ dpaa2_portal->evq_info[queues[i]].event_port = port;
+ dpaa2_portal->num_linked_evq++;
+ }
- ret = dpio_add_static_dequeue_channel(
- dpaa2_portal->dpio_dev->dpio,
- CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
- evq_info->dpcon->dpcon_id, &channel_index);
- if (ret < 0) {
- DPAA2_EVENTDEV_ERR(
- "Static dequeue config failed: err(%d)", ret);
- goto err;
- }
+ return (int)nb_links;
+}
- qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
- channel_index, 1);
- evq_info->dpcon->channel_index = channel_index;
- }
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dpaa2_port *dpaa2_portal = port;
+ int i;
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ struct dpaa2_eventq *evq_info;
+ struct qbman_swp *swp;
- RTE_SET_USED(priorities);
+ EVENTDEV_INIT_FUNC_TRACE();
- return (int)nb_links;
-err:
- for (n = 0; n < i; n++) {
- evq_info = &priv->evq_info[queues[n]];
- qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
- evq_info->dpcon->channel_index, 0);
- dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
- 0, dpaa2_portal->dpio_dev->token,
- evq_info->dpcon->dpcon_id);
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queues);
+
+ for (i = 0; i < nb_unlinks; i++) {
+ evq_info = &dpaa2_portal->evq_info[queues[i]];
+
+ if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
+ /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ qbman_swp_push_set(swp,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ memset(evq_info, 0, sizeof(struct dpaa2_eventq));
+ if (dpaa2_portal->num_linked_evq)
+ dpaa2_portal->num_linked_evq--;
}
- return ret;
+
+ if (!dpaa2_portal->num_linked_evq)
+ dpaa2_portal->is_port_linked = false;
+
+ return (int)nb_unlinks;
}
+
static int
dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
uint64_t *timeout_ticks)
{
- uint32_t scale = 1;
+ uint32_t scale = 1000*1000;
EVENTDEV_INIT_FUNC_TRACE();
@@ -677,6 +797,151 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
return 0;
}
+#ifdef RTE_LIBRTE_SECURITY
+static int
+dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ const char *name = cdev->data->name;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strncmp(name, "dpsec-", 6))
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
+ else
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ const struct rte_event *ev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
+ ret = dpaa2_sec_eventq_attach(cryptodev, i,
+ dpcon_id, ev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
+ ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa2_sec_eventq_detach(cryptodev, i);
+
+ return ret;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id,
+ const struct rte_event *ev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_crypto_queue_add_all(dev,
+ cryptodev, ev);
+
+ ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
+ dpcon_id, ev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev)
+{
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
+ ret = dpaa2_sec_eventq_detach(cdev, i);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_detach failed:ret %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
+
+ ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+#endif
+
static struct rte_eventdev_ops dpaa2_eventdev_ops = {
.dev_infos_get = dpaa2_eventdev_info_get,
.dev_configure = dpaa2_eventdev_configure,
@@ -698,6 +963,13 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
.eth_rx_adapter_start = dpaa2_eventdev_eth_start,
.eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+#ifdef RTE_LIBRTE_SECURITY
+ .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
+ .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
+ .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
+ .crypto_adapter_start = dpaa2_eventdev_crypto_start,
+ .crypto_adapter_stop = dpaa2_eventdev_crypto_stop,
+#endif
};
static int
@@ -789,6 +1061,8 @@ dpaa2_eventdev_create(const char *name)
priv->max_event_queues++;
} while (dpcon_dev && dpci_dev);
+ RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
+
return 0;
fail:
return -EFAULT;
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 229f66af..c847b3ea 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -21,6 +21,7 @@
#define DPAA2_EVENT_MAX_QUEUES 16
#define DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT 1
#define DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
#define DPAA2_EVENT_MAX_QUEUE_FLOWS 2048
#define DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
#define DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
@@ -41,6 +42,15 @@ enum {
(RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \
RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \
RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID)
+
+/**< Crypto Rx adapter cap to return If the packet transfers from
+ * the cryptodev to eventdev with DPAA2 devices.
+ */
+#define RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP \
+ (RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW | \
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND | \
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)
+
/**< Ethernet Rx adapter cap to return If the packet transfers from
* the ethdev to eventdev with DPAA2 devices.
*/
@@ -56,17 +66,27 @@ struct dpaa2_dpcon_dev {
uint8_t channel_index;
};
-struct evq_info_t {
+struct dpaa2_eventq {
/* DPcon device */
struct dpaa2_dpcon_dev *dpcon;
/* Attached DPCI device */
struct dpaa2_dpci_dev *dpci;
+ /* Mapped event port */
+ struct dpaa2_io_portal_t *event_port;
/* Configuration provided by the user */
uint32_t event_queue_cfg;
+ uint32_t event_queue_id;
+};
+
+struct dpaa2_port {
+ struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
+ uint8_t num_linked_evq;
+ uint8_t is_port_linked;
+ uint64_t timeout_us;
};
struct dpaa2_eventdev {
- struct evq_info_t evq_info[DPAA2_EVENT_MAX_QUEUES];
+ struct dpaa2_eventq evq_info[DPAA2_EVENT_MAX_QUEUES];
uint32_t dequeue_timeout_ns;
uint8_t max_event_queues;
uint8_t nb_event_queues;
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
index de7a4615..a0db6fc2 100644
--- a/drivers/event/dpaa2/meson.build
+++ b/drivers/event/dpaa2/meson.build
@@ -1,11 +1,14 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
-deps += ['bus_vdev', 'pmd_dpaa2']
+deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
sources = files('dpaa2_hw_dpcon.c',
'dpaa2_eventdev.c')
allow_experimental_apis = true
+includes += include_directories('../../crypto/dpaa2_sec/')
diff --git a/drivers/event/dsw/Makefile b/drivers/event/dsw/Makefile
new file mode 100644
index 00000000..490ed0b9
--- /dev/null
+++ b/drivers/event/dsw/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Ericsson AB
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_pmd_dsw_event.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -Wno-format-nonliteral
+endif
+
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_mbuf
+LDLIBS += -lrte_mempool
+LDLIBS += -lrte_ring
+LDLIBS += -lrte_eventdev
+LDLIBS += -lrte_bus_vdev
+
+LIBABIVER := 1
+
+EXPORT_MAP := rte_pmd_dsw_event_version.map
+
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV) += \
+ dsw_evdev.c dsw_event.c dsw_xstats.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
new file mode 100644
index 00000000..33ba1364
--- /dev/null
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -0,0 +1,435 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Ericsson AB
+ */
+
+#include <stdbool.h>
+
+#include <rte_cycles.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_random.h>
+
+#include "dsw_evdev.h"
+
+#define EVENTDEV_NAME_DSW_PMD event_dsw
+
+static int
+dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *conf)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ struct dsw_port *port;
+ struct rte_event_ring *in_ring;
+ struct rte_ring *ctl_in_ring;
+ char ring_name[RTE_RING_NAMESIZE];
+
+ port = &dsw->ports[port_id];
+
+ *port = (struct dsw_port) {
+ .id = port_id,
+ .dsw = dsw,
+ .dequeue_depth = conf->dequeue_depth,
+ .enqueue_depth = conf->enqueue_depth,
+ .new_event_threshold = conf->new_event_threshold
+ };
+
+ snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
+ port_id);
+
+ in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
+ dev->data->socket_id,
+ RING_F_SC_DEQ|RING_F_EXACT_SZ);
+
+ if (in_ring == NULL)
+ return -ENOMEM;
+
+ snprintf(ring_name, sizeof(ring_name), "dswctl%d_p%u",
+ dev->data->dev_id, port_id);
+
+ ctl_in_ring = rte_ring_create(ring_name, DSW_CTL_IN_RING_SIZE,
+ dev->data->socket_id,
+ RING_F_SC_DEQ|RING_F_EXACT_SZ);
+
+ if (ctl_in_ring == NULL) {
+ rte_event_ring_free(in_ring);
+ return -ENOMEM;
+ }
+
+ port->in_ring = in_ring;
+ port->ctl_in_ring = ctl_in_ring;
+
+ rte_atomic16_init(&port->load);
+
+ port->load_update_interval =
+ (DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;
+
+ port->migration_interval =
+ (DSW_MIGRATION_INTERVAL * rte_get_timer_hz()) / US_PER_S;
+
+ dev->data->ports[port_id] = port;
+
+ return 0;
+}
+
+static void
+dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
+ uint8_t port_id __rte_unused,
+ struct rte_event_port_conf *port_conf)
+{
+ *port_conf = (struct rte_event_port_conf) {
+ .new_event_threshold = 1024,
+ .dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
+ .enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
+ };
+}
+
+static void
+dsw_port_release(void *p)
+{
+ struct dsw_port *port = p;
+
+ rte_event_ring_free(port->in_ring);
+ rte_ring_free(port->ctl_in_ring);
+}
+
+static int
+dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *conf)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ struct dsw_queue *queue = &dsw->queues[queue_id];
+
+ if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
+ return -ENOTSUP;
+
+ if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
+ return -ENOTSUP;
+
+ /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
+ * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
+ * the queue will only have a single serving port, no
+ * migration will ever happen, so the extra TYPE_ATOMIC
+ * migration overhead is avoided.
+ */
+ if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
+ queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ else /* atomic or parallel */
+ queue->schedule_type = conf->schedule_type;
+
+ queue->num_serving_ports = 0;
+
+ return 0;
+}
+
+static void
+dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
+ uint8_t queue_id __rte_unused,
+ struct rte_event_queue_conf *queue_conf)
+{
+ *queue_conf = (struct rte_event_queue_conf) {
+ .nb_atomic_flows = 4096,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+ };
+}
+
+static void
+dsw_queue_release(struct rte_eventdev *dev __rte_unused,
+ uint8_t queue_id __rte_unused)
+{
+}
+
+static void
+queue_add_port(struct dsw_queue *queue, uint16_t port_id)
+{
+ queue->serving_ports[queue->num_serving_ports] = port_id;
+ queue->num_serving_ports++;
+}
+
+static bool
+queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
+{
+ uint16_t i;
+
+ for (i = 0; i < queue->num_serving_ports; i++)
+ if (queue->serving_ports[i] == port_id) {
+ uint16_t last_idx = queue->num_serving_ports - 1;
+ if (i != last_idx)
+ queue->serving_ports[i] =
+ queue->serving_ports[last_idx];
+ queue->num_serving_ports--;
+ return true;
+ }
+ return false;
+}
+
+static int
+dsw_port_link_unlink(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], uint16_t num, bool link)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ struct dsw_port *p = port;
+ uint16_t i;
+ uint16_t count = 0;
+
+ for (i = 0; i < num; i++) {
+ uint8_t qid = queues[i];
+ struct dsw_queue *q = &dsw->queues[qid];
+ if (link) {
+ queue_add_port(q, p->id);
+ count++;
+ } else {
+ bool removed = queue_remove_port(q, p->id);
+ if (removed)
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+dsw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[] __rte_unused, uint16_t num)
+{
+ return dsw_port_link_unlink(dev, port, queues, num, true);
+}
+
+static int
+dsw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t num)
+{
+ return dsw_port_link_unlink(dev, port, queues, num, false);
+}
+
+static void
+dsw_info_get(struct rte_eventdev *dev __rte_unused,
+ struct rte_event_dev_info *info)
+{
+ *info = (struct rte_event_dev_info) {
+ .driver_name = DSW_PMD_NAME,
+ .max_event_queues = DSW_MAX_QUEUES,
+ .max_event_queue_flows = DSW_MAX_FLOWS,
+ .max_event_queue_priority_levels = 1,
+ .max_event_priority_levels = 1,
+ .max_event_ports = DSW_MAX_PORTS,
+ .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
+ .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
+ .max_num_events = DSW_MAX_EVENTS,
+ .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
+ };
+}
+
+static int
+dsw_configure(const struct rte_eventdev *dev)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ const struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ int32_t min_max_in_flight;
+
+ dsw->num_ports = conf->nb_event_ports;
+ dsw->num_queues = conf->nb_event_queues;
+
+ /* Avoid a situation where consumer ports are holding all the
+ * credits, without making use of them.
+ */
+ min_max_in_flight = conf->nb_event_ports * DSW_PORT_MAX_CREDITS;
+
+ dsw->max_inflight = RTE_MAX(conf->nb_events_limit, min_max_in_flight);
+
+ return 0;
+}
+
+
+static void
+initial_flow_to_port_assignment(struct dsw_evdev *dsw)
+{
+ uint8_t queue_id;
+ for (queue_id = 0; queue_id < dsw->num_queues; queue_id++) {
+ struct dsw_queue *queue = &dsw->queues[queue_id];
+ uint16_t flow_hash;
+ for (flow_hash = 0; flow_hash < DSW_MAX_FLOWS; flow_hash++) {
+ uint8_t port_idx =
+ rte_rand() % queue->num_serving_ports;
+ uint8_t port_id =
+ queue->serving_ports[port_idx];
+ dsw->queues[queue_id].flow_to_port_map[flow_hash] =
+ port_id;
+ }
+ }
+}
+
+static int
+dsw_start(struct rte_eventdev *dev)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ uint16_t i;
+ uint64_t now;
+
+ rte_atomic32_init(&dsw->credits_on_loan);
+
+ initial_flow_to_port_assignment(dsw);
+
+ now = rte_get_timer_cycles();
+ for (i = 0; i < dsw->num_ports; i++) {
+ dsw->ports[i].measurement_start = now;
+ dsw->ports[i].busy_start = now;
+ }
+
+ return 0;
+}
+
+static void
+dsw_port_drain_buf(uint8_t dev_id, struct rte_event *buf, uint16_t buf_len,
+ eventdev_stop_flush_t flush, void *flush_arg)
+{
+ uint16_t i;
+
+ for (i = 0; i < buf_len; i++)
+ flush(dev_id, buf[i], flush_arg);
+}
+
+static void
+dsw_port_drain_paused(uint8_t dev_id, struct dsw_port *port,
+ eventdev_stop_flush_t flush, void *flush_arg)
+{
+ dsw_port_drain_buf(dev_id, port->paused_events, port->paused_events_len,
+ flush, flush_arg);
+}
+
+static void
+dsw_port_drain_out(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port,
+ eventdev_stop_flush_t flush, void *flush_arg)
+{
+ uint16_t dport_id;
+
+ for (dport_id = 0; dport_id < dsw->num_ports; dport_id++)
+ if (dport_id != port->id)
+ dsw_port_drain_buf(dev_id, port->out_buffer[dport_id],
+ port->out_buffer_len[dport_id],
+ flush, flush_arg);
+}
+
+static void
+dsw_port_drain_in_ring(uint8_t dev_id, struct dsw_port *port,
+ eventdev_stop_flush_t flush, void *flush_arg)
+{
+ struct rte_event ev;
+
+ while (rte_event_ring_dequeue_burst(port->in_ring, &ev, 1, NULL))
+ flush(dev_id, ev, flush_arg);
+}
+
+static void
+dsw_drain(uint8_t dev_id, struct dsw_evdev *dsw,
+ eventdev_stop_flush_t flush, void *flush_arg)
+{
+ uint16_t port_id;
+
+ if (flush == NULL)
+ return;
+
+ for (port_id = 0; port_id < dsw->num_ports; port_id++) {
+ struct dsw_port *port = &dsw->ports[port_id];
+
+ dsw_port_drain_out(dev_id, dsw, port, flush, flush_arg);
+ dsw_port_drain_paused(dev_id, port, flush, flush_arg);
+ dsw_port_drain_in_ring(dev_id, port, flush, flush_arg);
+ }
+}
+
+static void
+dsw_stop(struct rte_eventdev *dev)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ uint8_t dev_id;
+ eventdev_stop_flush_t flush;
+ void *flush_arg;
+
+ dev_id = dev->data->dev_id;
+ flush = dev->dev_ops->dev_stop_flush;
+ flush_arg = dev->data->dev_stop_flush_arg;
+
+ dsw_drain(dev_id, dsw, flush, flush_arg);
+}
+
+static int
+dsw_close(struct rte_eventdev *dev)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+
+ dsw->num_ports = 0;
+ dsw->num_queues = 0;
+
+ return 0;
+}
+
+static struct rte_eventdev_ops dsw_evdev_ops = {
+ .port_setup = dsw_port_setup,
+ .port_def_conf = dsw_port_def_conf,
+ .port_release = dsw_port_release,
+ .queue_setup = dsw_queue_setup,
+ .queue_def_conf = dsw_queue_def_conf,
+ .queue_release = dsw_queue_release,
+ .port_link = dsw_port_link,
+ .port_unlink = dsw_port_unlink,
+ .dev_infos_get = dsw_info_get,
+ .dev_configure = dsw_configure,
+ .dev_start = dsw_start,
+ .dev_stop = dsw_stop,
+ .dev_close = dsw_close,
+ .xstats_get = dsw_xstats_get,
+ .xstats_get_names = dsw_xstats_get_names,
+ .xstats_get_by_name = dsw_xstats_get_by_name
+};
+
+static int
+dsw_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_eventdev *dev;
+ struct dsw_evdev *dsw;
+
+ name = rte_vdev_device_name(vdev);
+
+ dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
+ rte_socket_id());
+ if (dev == NULL)
+ return -EFAULT;
+
+ dev->dev_ops = &dsw_evdev_ops;
+ dev->enqueue = dsw_event_enqueue;
+ dev->enqueue_burst = dsw_event_enqueue_burst;
+ dev->enqueue_new_burst = dsw_event_enqueue_new_burst;
+ dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
+ dev->dequeue = dsw_event_dequeue;
+ dev->dequeue_burst = dsw_event_dequeue_burst;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dsw = dev->data->dev_private;
+ dsw->data = dev->data;
+
+ return 0;
+}
+
+static int
+dsw_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_dsw_pmd_drv = {
+ .probe = dsw_probe,
+ .remove = dsw_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
new file mode 100644
index 00000000..dc28ab12
--- /dev/null
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Ericsson AB
+ */
+
+#ifndef _DSW_EVDEV_H_
+#define _DSW_EVDEV_H_
+
+#include <rte_event_ring.h>
+#include <rte_eventdev.h>
+
+#define DSW_PMD_NAME RTE_STR(event_dsw)
+
+/* Code changes are required to allow more ports. */
+#define DSW_MAX_PORTS (64)
+#define DSW_MAX_PORT_DEQUEUE_DEPTH (128)
+#define DSW_MAX_PORT_ENQUEUE_DEPTH (128)
+#define DSW_MAX_PORT_OUT_BUFFER (32)
+
+#define DSW_MAX_QUEUES (16)
+
+#define DSW_MAX_EVENTS (16384)
+
+/* Code changes are required to allow more flows than 32k. */
+#define DSW_MAX_FLOWS_BITS (15)
+#define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
+#define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
+
+/* Eventdev RTE_SCHED_TYPE_PARALLEL doesn't have a concept of flows,
+ * but the 'dsw' scheduler (more or less) randomly assign flow id to
+ * events on parallel queues, to be able to reuse some of the
+ * migration mechanism and scheduling logic from
+ * RTE_SCHED_TYPE_ATOMIC. By moving one of the parallel "flows" from a
+ * particular port, the likely-hood of events being scheduled to this
+ * port is reduced, and thus a kind of statistical load balancing is
+ * achieved.
+ */
+#define DSW_PARALLEL_FLOWS (1024)
+
+/* 'Background tasks' are polling the control rings for *
+ * migration-related messages, or flush the output buffer (so
+ * buffered events doesn't linger too long). Shouldn't be too low,
+ * since the system won't benefit from the 'batching' effects from
+ * the output buffer, and shouldn't be too high, since it will make
+ * buffered events linger too long in case the port goes idle.
+ */
+#define DSW_MAX_PORT_OPS_PER_BG_TASK (128)
+
+/* Avoid making small 'loans' from the central in-flight event credit
+ * pool, to improve efficiency.
+ */
+#define DSW_MIN_CREDIT_LOAN (64)
+#define DSW_PORT_MAX_CREDITS (2*DSW_MIN_CREDIT_LOAN)
+#define DSW_PORT_MIN_CREDITS (DSW_MIN_CREDIT_LOAN)
+
+/* The rings are dimensioned so that all in-flight events can reside
+ * on any one of the port rings, to avoid the trouble of having to
+ * care about the case where there's no room on the destination port's
+ * input ring.
+ */
+#define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)
+
+#define DSW_MAX_LOAD (INT16_MAX)
+#define DSW_LOAD_FROM_PERCENT(x) ((int16_t)(((x)*DSW_MAX_LOAD)/100))
+#define DSW_LOAD_TO_PERCENT(x) ((100*x)/DSW_MAX_LOAD)
+
+/* The thought behind keeping the load update interval shorter than
+ * the migration interval is that the load from newly migrated flows
+ * should 'show up' on the load measurement before new migrations are
+ * considered. This is to avoid having too many flows, from too many
+ * source ports, to be migrated too quickly to a lightly loaded port -
+ * in particular since this might cause the system to oscillate.
+ */
+#define DSW_LOAD_UPDATE_INTERVAL (DSW_MIGRATION_INTERVAL/4)
+#define DSW_OLD_LOAD_WEIGHT (1)
+
+/* The minimum time (in us) between two flow migrations. What puts an
+ * upper limit on the actual migration rate is primarily the pace in
+ * which the ports send and receive control messages, which in turn is
+ * largely a function of how much cycles are spent the processing of
+ * an event burst.
+ */
+#define DSW_MIGRATION_INTERVAL (1000)
+#define DSW_MIN_SOURCE_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(70))
+#define DSW_MAX_TARGET_LOAD_FOR_MIGRATION (DSW_LOAD_FROM_PERCENT(95))
+
+#define DSW_MAX_EVENTS_RECORDED (128)
+
+/* Only one outstanding migration per port is allowed */
+#define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS)
+
+/* Enough room for paus request/confirm and unpaus request/confirm for
+ * all possible senders.
+ */
+#define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
+
+/* With DSW_SORT_DEQUEUED enabled, the scheduler will, at the point of
+ * dequeue(), arrange events so that events with the same flow id on
+ * the same queue forms a back-to-back "burst", and also so that such
+ * bursts of different flow ids, but on the same queue, also come
+ * consecutively. All this in an attempt to improve data and
+ * instruction cache usage for the application, at the cost of a
+ * scheduler overhead increase.
+ */
+
+/* #define DSW_SORT_DEQUEUED */
+
+struct dsw_queue_flow {
+ uint8_t queue_id;
+ uint16_t flow_hash;
+};
+
+enum dsw_migration_state {
+ DSW_MIGRATION_STATE_IDLE,
+ DSW_MIGRATION_STATE_PAUSING,
+ DSW_MIGRATION_STATE_FORWARDING,
+ DSW_MIGRATION_STATE_UNPAUSING
+};
+
+struct dsw_port {
+ uint16_t id;
+
+ /* Keeping a pointer here to avoid container_of() calls, which
+ * are expensive since they are very frequent and will result
+ * in an integer multiplication (since the port id is an index
+ * into the dsw_evdev port array).
+ */
+ struct dsw_evdev *dsw;
+
+ uint16_t dequeue_depth;
+ uint16_t enqueue_depth;
+
+ int32_t inflight_credits;
+
+ int32_t new_event_threshold;
+
+ uint16_t pending_releases;
+
+ uint16_t next_parallel_flow_id;
+
+ uint16_t ops_since_bg_task;
+
+ /* most recent 'background' processing */
+ uint64_t last_bg;
+
+ /* For port load measurement. */
+ uint64_t next_load_update;
+ uint64_t load_update_interval;
+ uint64_t measurement_start;
+ uint64_t busy_start;
+ uint64_t busy_cycles;
+ uint64_t total_busy_cycles;
+
+ /* For the ctl interface and flow migration mechanism. */
+ uint64_t next_migration;
+ uint64_t migration_interval;
+ enum dsw_migration_state migration_state;
+
+ uint64_t migration_start;
+ uint64_t migrations;
+ uint64_t migration_latency;
+
+ uint8_t migration_target_port_id;
+ struct dsw_queue_flow migration_target_qf;
+ uint8_t cfm_cnt;
+
+ uint16_t paused_flows_len;
+ struct dsw_queue_flow paused_flows[DSW_MAX_PAUSED_FLOWS];
+
+ /* In a very contrived worst case all inflight events can be
+ * laying around paused here.
+ */
+ uint16_t paused_events_len;
+ struct rte_event paused_events[DSW_MAX_EVENTS];
+
+ uint16_t seen_events_len;
+ uint16_t seen_events_idx;
+ struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED];
+
+ uint64_t new_enqueued;
+ uint64_t forward_enqueued;
+ uint64_t release_enqueued;
+ uint64_t queue_enqueued[DSW_MAX_QUEUES];
+
+ uint64_t dequeued;
+ uint64_t queue_dequeued[DSW_MAX_QUEUES];
+
+ uint16_t out_buffer_len[DSW_MAX_PORTS];
+ struct rte_event out_buffer[DSW_MAX_PORTS][DSW_MAX_PORT_OUT_BUFFER];
+
+ uint16_t in_buffer_len;
+ uint16_t in_buffer_start;
+ /* This buffer may contain events that were read up from the
+ * in_ring during the flow migration process.
+ */
+ struct rte_event in_buffer[DSW_MAX_EVENTS];
+
+ struct rte_event_ring *in_ring __rte_cache_aligned;
+
+ struct rte_ring *ctl_in_ring __rte_cache_aligned;
+
+ /* Estimate of current port load. */
+ rte_atomic16_t load __rte_cache_aligned;
+} __rte_cache_aligned;
+
+struct dsw_queue {
+ uint8_t schedule_type;
+ uint8_t serving_ports[DSW_MAX_PORTS];
+ uint16_t num_serving_ports;
+
+ uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned;
+};
+
+struct dsw_evdev {
+ struct rte_eventdev_data *data;
+
+ struct dsw_port ports[DSW_MAX_PORTS];
+ uint16_t num_ports;
+ struct dsw_queue queues[DSW_MAX_QUEUES];
+ uint8_t num_queues;
+ int32_t max_inflight;
+
+ rte_atomic32_t credits_on_loan __rte_cache_aligned;
+};
+
+#define DSW_CTL_PAUS_REQ (0)
+#define DSW_CTL_UNPAUS_REQ (1)
+#define DSW_CTL_CFM (2)
+
+/* sizeof(struct dsw_ctl_msg) must be equal or less than
+ * sizeof(void *), to fit on the control ring.
+ */
+struct dsw_ctl_msg {
+ uint8_t type:2;
+ uint8_t originating_port_id:6;
+ uint8_t queue_id;
+ uint16_t flow_hash;
+} __rte_packed;
+
+uint16_t dsw_event_enqueue(void *port, const struct rte_event *event);
+uint16_t dsw_event_enqueue_burst(void *port,
+ const struct rte_event events[],
+ uint16_t events_len);
+uint16_t dsw_event_enqueue_new_burst(void *port,
+ const struct rte_event events[],
+ uint16_t events_len);
+uint16_t dsw_event_enqueue_forward_burst(void *port,
+ const struct rte_event events[],
+ uint16_t events_len);
+
+uint16_t dsw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
+uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
+ uint16_t num, uint64_t wait);
+
+int dsw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+int dsw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t dsw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+
+static inline struct dsw_evdev *
+dsw_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+#define DSW_LOG_DP(level, fmt, args...) \
+ RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
+ DSW_PMD_NAME, \
+ __func__, __LINE__, ## args)
+
+#define DSW_LOG_DP_PORT(level, port_id, fmt, args...) \
+ DSW_LOG_DP(level, "<Port %d> " fmt, port_id, ## args)
+
+#endif
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
new file mode 100644
index 00000000..61a66fab
--- /dev/null
+++ b/drivers/event/dsw/dsw_event.c
@@ -0,0 +1,1253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Ericsson AB
+ */
+
+#include "dsw_evdev.h"
+
+#ifdef DSW_SORT_DEQUEUED
+#include "dsw_sort.h"
+#endif
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_memcpy.h>
+#include <rte_random.h>
+
+static bool
+dsw_port_acquire_credits(struct dsw_evdev *dsw, struct dsw_port *port,
+ int32_t credits)
+{
+ int32_t inflight_credits = port->inflight_credits;
+ int32_t missing_credits = credits - inflight_credits;
+ int32_t total_on_loan;
+ int32_t available;
+ int32_t acquired_credits;
+ int32_t new_total_on_loan;
+
+ if (likely(missing_credits <= 0)) {
+ port->inflight_credits -= credits;
+ return true;
+ }
+
+ total_on_loan = rte_atomic32_read(&dsw->credits_on_loan);
+ available = dsw->max_inflight - total_on_loan;
+ acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
+
+ if (available < acquired_credits)
+ return false;
+
+ /* This is a race, no locks are involved, and thus some other
+ * thread can allocate tokens in between the check and the
+ * allocation.
+ */
+ new_total_on_loan = rte_atomic32_add_return(&dsw->credits_on_loan,
+ acquired_credits);
+
+ if (unlikely(new_total_on_loan > dsw->max_inflight)) {
+ /* Some other port took the last credits */
+ rte_atomic32_sub(&dsw->credits_on_loan, acquired_credits);
+ return false;
+ }
+
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Acquired %d tokens from pool.\n",
+ acquired_credits);
+
+ port->inflight_credits += acquired_credits;
+ port->inflight_credits -= credits;
+
+ return true;
+}
+
+static void
+dsw_port_return_credits(struct dsw_evdev *dsw, struct dsw_port *port,
+ int32_t credits)
+{
+ port->inflight_credits += credits;
+
+ if (unlikely(port->inflight_credits > DSW_PORT_MAX_CREDITS)) {
+ int32_t leave_credits = DSW_PORT_MIN_CREDITS;
+ int32_t return_credits =
+ port->inflight_credits - leave_credits;
+
+ port->inflight_credits = leave_credits;
+
+ rte_atomic32_sub(&dsw->credits_on_loan, return_credits);
+
+ DSW_LOG_DP_PORT(DEBUG, port->id,
+ "Returned %d tokens to pool.\n",
+ return_credits);
+ }
+}
+
+static void
+dsw_port_enqueue_stats(struct dsw_port *port, uint16_t num_new,
+ uint16_t num_forward, uint16_t num_release)
+{
+ port->new_enqueued += num_new;
+ port->forward_enqueued += num_forward;
+ port->release_enqueued += num_release;
+}
+
+static void
+dsw_port_queue_enqueue_stats(struct dsw_port *source_port, uint8_t queue_id)
+{
+ source_port->queue_enqueued[queue_id]++;
+}
+
+static void
+dsw_port_dequeue_stats(struct dsw_port *port, uint16_t num)
+{
+ port->dequeued += num;
+}
+
+static void
+dsw_port_queue_dequeued_stats(struct dsw_port *source_port, uint8_t queue_id)
+{
+ source_port->queue_dequeued[queue_id]++;
+}
+
+static void
+dsw_port_load_record(struct dsw_port *port, unsigned int dequeued)
+{
+ if (dequeued > 0 && port->busy_start == 0)
+ /* work period begins */
+ port->busy_start = rte_get_timer_cycles();
+ else if (dequeued == 0 && port->busy_start > 0) {
+ /* work period ends */
+ uint64_t work_period =
+ rte_get_timer_cycles() - port->busy_start;
+ port->busy_cycles += work_period;
+ port->busy_start = 0;
+ }
+}
+
+static int16_t
+dsw_port_load_close_period(struct dsw_port *port, uint64_t now)
+{
+ uint64_t passed = now - port->measurement_start;
+ uint64_t busy_cycles = port->busy_cycles;
+
+ if (port->busy_start > 0) {
+ busy_cycles += (now - port->busy_start);
+ port->busy_start = now;
+ }
+
+ int16_t load = (DSW_MAX_LOAD * busy_cycles) / passed;
+
+ port->measurement_start = now;
+ port->busy_cycles = 0;
+
+ port->total_busy_cycles += busy_cycles;
+
+ return load;
+}
+
+static void
+dsw_port_load_update(struct dsw_port *port, uint64_t now)
+{
+ int16_t old_load;
+ int16_t period_load;
+ int16_t new_load;
+
+ old_load = rte_atomic16_read(&port->load);
+
+ period_load = dsw_port_load_close_period(port, now);
+
+ new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
+ (DSW_OLD_LOAD_WEIGHT+1);
+
+ rte_atomic16_set(&port->load, new_load);
+}
+
+static void
+dsw_port_consider_load_update(struct dsw_port *port, uint64_t now)
+{
+ if (now < port->next_load_update)
+ return;
+
+ port->next_load_update = now + port->load_update_interval;
+
+ dsw_port_load_update(port, now);
+}
+
+static void
+dsw_port_ctl_enqueue(struct dsw_port *port, struct dsw_ctl_msg *msg)
+{
+ void *raw_msg;
+
+ memcpy(&raw_msg, msg, sizeof(*msg));
+
+ /* there's always room on the ring */
+ while (rte_ring_enqueue(port->ctl_in_ring, raw_msg) != 0)
+ rte_pause();
+}
+
+static int
+dsw_port_ctl_dequeue(struct dsw_port *port, struct dsw_ctl_msg *msg)
+{
+ void *raw_msg;
+ int rc;
+
+ rc = rte_ring_dequeue(port->ctl_in_ring, &raw_msg);
+
+ if (rc == 0)
+ memcpy(msg, &raw_msg, sizeof(*msg));
+
+ return rc;
+}
+
+static void
+dsw_port_ctl_broadcast(struct dsw_evdev *dsw, struct dsw_port *source_port,
+ uint8_t type, uint8_t queue_id, uint16_t flow_hash)
+{
+ uint16_t port_id;
+ struct dsw_ctl_msg msg = {
+ .type = type,
+ .originating_port_id = source_port->id,
+ .queue_id = queue_id,
+ .flow_hash = flow_hash
+ };
+
+ for (port_id = 0; port_id < dsw->num_ports; port_id++)
+ if (port_id != source_port->id)
+ dsw_port_ctl_enqueue(&dsw->ports[port_id], &msg);
+}
+
+static bool
+dsw_port_is_flow_paused(struct dsw_port *port, uint8_t queue_id,
+ uint16_t flow_hash)
+{
+ uint16_t i;
+
+ for (i = 0; i < port->paused_flows_len; i++) {
+ struct dsw_queue_flow *qf = &port->paused_flows[i];
+ if (qf->queue_id == queue_id &&
+ qf->flow_hash == flow_hash)
+ return true;
+ }
+ return false;
+}
+
+static void
+dsw_port_add_paused_flow(struct dsw_port *port, uint8_t queue_id,
+ uint16_t paused_flow_hash)
+{
+ port->paused_flows[port->paused_flows_len] = (struct dsw_queue_flow) {
+ .queue_id = queue_id,
+ .flow_hash = paused_flow_hash
+ };
+ port->paused_flows_len++;
+}
+
+static void
+dsw_port_remove_paused_flow(struct dsw_port *port, uint8_t queue_id,
+ uint16_t paused_flow_hash)
+{
+ uint16_t i;
+
+ for (i = 0; i < port->paused_flows_len; i++) {
+ struct dsw_queue_flow *qf = &port->paused_flows[i];
+
+ if (qf->queue_id == queue_id &&
+ qf->flow_hash == paused_flow_hash) {
+ uint16_t last_idx = port->paused_flows_len-1;
+ if (i != last_idx)
+ port->paused_flows[i] =
+ port->paused_flows[last_idx];
+ port->paused_flows_len--;
+ break;
+ }
+ }
+}
+
+static void
+dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port);
+
+static void
+dsw_port_handle_pause_flow(struct dsw_evdev *dsw, struct dsw_port *port,
+ uint8_t originating_port_id, uint8_t queue_id,
+ uint16_t paused_flow_hash)
+{
+ struct dsw_ctl_msg cfm = {
+ .type = DSW_CTL_CFM,
+ .originating_port_id = port->id,
+ .queue_id = queue_id,
+ .flow_hash = paused_flow_hash
+ };
+
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Pausing queue_id %d flow_hash %d.\n",
+ queue_id, paused_flow_hash);
+
+ /* There might be already-scheduled events belonging to the
+ * paused flow in the output buffers.
+ */
+ dsw_port_flush_out_buffers(dsw, port);
+
+ dsw_port_add_paused_flow(port, queue_id, paused_flow_hash);
+
+ /* Make sure any stores to the original port's in_ring is seen
+ * before the ctl message.
+ */
+ rte_smp_wmb();
+
+ dsw_port_ctl_enqueue(&dsw->ports[originating_port_id], &cfm);
+}
+
+static void
+dsw_find_lowest_load_port(uint8_t *port_ids, uint16_t num_port_ids,
+ uint8_t exclude_port_id, int16_t *port_loads,
+ uint8_t *target_port_id, int16_t *target_load)
+{
+ int16_t candidate_port_id = -1;
+ int16_t candidate_load = DSW_MAX_LOAD;
+ uint16_t i;
+
+ for (i = 0; i < num_port_ids; i++) {
+ uint8_t port_id = port_ids[i];
+ if (port_id != exclude_port_id) {
+ int16_t load = port_loads[port_id];
+ if (candidate_port_id == -1 ||
+ load < candidate_load) {
+ candidate_port_id = port_id;
+ candidate_load = load;
+ }
+ }
+ }
+ *target_port_id = candidate_port_id;
+ *target_load = candidate_load;
+}
+
+struct dsw_queue_flow_burst {
+ struct dsw_queue_flow queue_flow;
+ uint16_t count;
+};
+
+static inline int
+dsw_cmp_burst(const void *v_burst_a, const void *v_burst_b)
+{
+ const struct dsw_queue_flow_burst *burst_a = v_burst_a;
+ const struct dsw_queue_flow_burst *burst_b = v_burst_b;
+
+ int a_count = burst_a->count;
+ int b_count = burst_b->count;
+
+ return a_count - b_count;
+}
+
+#define DSW_QF_TO_INT(_qf) \
+ ((int)((((_qf)->queue_id)<<16)|((_qf)->flow_hash)))
+
+static inline int
+dsw_cmp_qf(const void *v_qf_a, const void *v_qf_b)
+{
+ const struct dsw_queue_flow *qf_a = v_qf_a;
+ const struct dsw_queue_flow *qf_b = v_qf_b;
+
+ return DSW_QF_TO_INT(qf_a) - DSW_QF_TO_INT(qf_b);
+}
+
+static uint16_t
+dsw_sort_qfs_to_bursts(struct dsw_queue_flow *qfs, uint16_t qfs_len,
+ struct dsw_queue_flow_burst *bursts)
+{
+ uint16_t i;
+ struct dsw_queue_flow_burst *current_burst = NULL;
+ uint16_t num_bursts = 0;
+
+ /* We don't need the stable property, and the list is likely
+ * large enough for qsort() to outperform dsw_stable_sort(),
+ * so we use qsort() here.
+ */
+ qsort(qfs, qfs_len, sizeof(qfs[0]), dsw_cmp_qf);
+
+ /* arrange the (now-consecutive) events into bursts */
+ for (i = 0; i < qfs_len; i++) {
+ if (i == 0 ||
+ dsw_cmp_qf(&qfs[i], &current_burst->queue_flow) != 0) {
+ current_burst = &bursts[num_bursts];
+ current_burst->queue_flow = qfs[i];
+ current_burst->count = 0;
+ num_bursts++;
+ }
+ current_burst->count++;
+ }
+
+ qsort(bursts, num_bursts, sizeof(bursts[0]), dsw_cmp_burst);
+
+ return num_bursts;
+}
+
+static bool
+dsw_retrieve_port_loads(struct dsw_evdev *dsw, int16_t *port_loads,
+ int16_t load_limit)
+{
+ bool below_limit = false;
+ uint16_t i;
+
+ for (i = 0; i < dsw->num_ports; i++) {
+ int16_t load = rte_atomic16_read(&dsw->ports[i].load);
+ if (load < load_limit)
+ below_limit = true;
+ port_loads[i] = load;
+ }
+ return below_limit;
+}
+
+static bool
+dsw_select_migration_target(struct dsw_evdev *dsw,
+ struct dsw_port *source_port,
+ struct dsw_queue_flow_burst *bursts,
+ uint16_t num_bursts, int16_t *port_loads,
+ int16_t max_load, struct dsw_queue_flow *target_qf,
+ uint8_t *target_port_id)
+{
+ uint16_t source_load = port_loads[source_port->id];
+ uint16_t i;
+
+ for (i = 0; i < num_bursts; i++) {
+ struct dsw_queue_flow *qf = &bursts[i].queue_flow;
+
+ if (dsw_port_is_flow_paused(source_port, qf->queue_id,
+ qf->flow_hash))
+ continue;
+
+ struct dsw_queue *queue = &dsw->queues[qf->queue_id];
+ int16_t target_load;
+
+ dsw_find_lowest_load_port(queue->serving_ports,
+ queue->num_serving_ports,
+ source_port->id, port_loads,
+ target_port_id, &target_load);
+
+ if (target_load < source_load &&
+ target_load < max_load) {
+ *target_qf = *qf;
+ return true;
+ }
+ }
+
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "For the %d flows considered, "
+ "no target port found with load less than %d.\n",
+ num_bursts, DSW_LOAD_TO_PERCENT(max_load));
+
+ return false;
+}
+
+static uint8_t
+dsw_schedule(struct dsw_evdev *dsw, uint8_t queue_id, uint16_t flow_hash)
+{
+ struct dsw_queue *queue = &dsw->queues[queue_id];
+ uint8_t port_id;
+
+ if (queue->num_serving_ports > 1)
+ port_id = queue->flow_to_port_map[flow_hash];
+ else
+ /* A single-link queue, or atomic/ordered/parallel but
+ * with just a single serving port.
+ */
+ port_id = queue->serving_ports[0];
+
+ DSW_LOG_DP(DEBUG, "Event with queue_id %d flow_hash %d is scheduled "
+ "to port %d.\n", queue_id, flow_hash, port_id);
+
+ return port_id;
+}
+
+static void
+dsw_port_transmit_buffered(struct dsw_evdev *dsw, struct dsw_port *source_port,
+ uint8_t dest_port_id)
+{
+ struct dsw_port *dest_port = &(dsw->ports[dest_port_id]);
+ uint16_t *buffer_len = &source_port->out_buffer_len[dest_port_id];
+ struct rte_event *buffer = source_port->out_buffer[dest_port_id];
+ uint16_t enqueued = 0;
+
+ if (*buffer_len == 0)
+ return;
+
+ /* The rings are dimensioned to fit all in-flight events (even
+ * on a single ring), so looping will work.
+ */
+ do {
+ enqueued +=
+ rte_event_ring_enqueue_burst(dest_port->in_ring,
+ buffer+enqueued,
+ *buffer_len-enqueued,
+ NULL);
+ } while (unlikely(enqueued != *buffer_len));
+
+ (*buffer_len) = 0;
+}
+
+static uint16_t
+dsw_port_get_parallel_flow_id(struct dsw_port *port)
+{
+ uint16_t flow_id = port->next_parallel_flow_id;
+
+ port->next_parallel_flow_id =
+ (port->next_parallel_flow_id + 1) % DSW_PARALLEL_FLOWS;
+
+ return flow_id;
+}
+
+static void
+dsw_port_buffer_paused(struct dsw_port *port,
+ const struct rte_event *paused_event)
+{
+ port->paused_events[port->paused_events_len] = *paused_event;
+ port->paused_events_len++;
+}
+
+static void
+dsw_port_buffer_non_paused(struct dsw_evdev *dsw, struct dsw_port *source_port,
+ uint8_t dest_port_id, const struct rte_event *event)
+{
+ struct rte_event *buffer = source_port->out_buffer[dest_port_id];
+ uint16_t *buffer_len = &source_port->out_buffer_len[dest_port_id];
+
+ if (*buffer_len == DSW_MAX_PORT_OUT_BUFFER)
+ dsw_port_transmit_buffered(dsw, source_port, dest_port_id);
+
+ buffer[*buffer_len] = *event;
+
+ (*buffer_len)++;
+}
+
+#define DSW_FLOW_ID_BITS (24)
+static uint16_t
+dsw_flow_id_hash(uint32_t flow_id)
+{
+ uint16_t hash = 0;
+ uint16_t offset = 0;
+
+ do {
+ hash ^= ((flow_id >> offset) & DSW_MAX_FLOWS_MASK);
+ offset += DSW_MAX_FLOWS_BITS;
+ } while (offset < DSW_FLOW_ID_BITS);
+
+ return hash;
+}
+
+static void
+dsw_port_buffer_parallel(struct dsw_evdev *dsw, struct dsw_port *source_port,
+ struct rte_event event)
+{
+ uint8_t dest_port_id;
+
+ event.flow_id = dsw_port_get_parallel_flow_id(source_port);
+
+ dest_port_id = dsw_schedule(dsw, event.queue_id,
+ dsw_flow_id_hash(event.flow_id));
+
+ dsw_port_buffer_non_paused(dsw, source_port, dest_port_id, &event);
+}
+
+static void
+dsw_port_buffer_event(struct dsw_evdev *dsw, struct dsw_port *source_port,
+ const struct rte_event *event)
+{
+ uint16_t flow_hash;
+ uint8_t dest_port_id;
+
+ if (unlikely(dsw->queues[event->queue_id].schedule_type ==
+ RTE_SCHED_TYPE_PARALLEL)) {
+ dsw_port_buffer_parallel(dsw, source_port, *event);
+ return;
+ }
+
+ flow_hash = dsw_flow_id_hash(event->flow_id);
+
+ if (unlikely(dsw_port_is_flow_paused(source_port, event->queue_id,
+ flow_hash))) {
+ dsw_port_buffer_paused(source_port, event);
+ return;
+ }
+
+ dest_port_id = dsw_schedule(dsw, event->queue_id, flow_hash);
+
+ dsw_port_buffer_non_paused(dsw, source_port, dest_port_id, event);
+}
+
+static void
+dsw_port_flush_paused_events(struct dsw_evdev *dsw,
+ struct dsw_port *source_port,
+ uint8_t queue_id, uint16_t paused_flow_hash)
+{
+ uint16_t paused_events_len = source_port->paused_events_len;
+ struct rte_event paused_events[paused_events_len];
+ uint8_t dest_port_id;
+ uint16_t i;
+
+ if (paused_events_len == 0)
+ return;
+
+ if (dsw_port_is_flow_paused(source_port, queue_id, paused_flow_hash))
+ return;
+
+ rte_memcpy(paused_events, source_port->paused_events,
+ paused_events_len * sizeof(struct rte_event));
+
+ source_port->paused_events_len = 0;
+
+ dest_port_id = dsw_schedule(dsw, queue_id, paused_flow_hash);
+
+ for (i = 0; i < paused_events_len; i++) {
+ struct rte_event *event = &paused_events[i];
+ uint16_t flow_hash;
+
+ flow_hash = dsw_flow_id_hash(event->flow_id);
+
+ if (event->queue_id == queue_id &&
+ flow_hash == paused_flow_hash)
+ dsw_port_buffer_non_paused(dsw, source_port,
+ dest_port_id, event);
+ else
+ dsw_port_buffer_paused(source_port, event);
+ }
+}
+
+static void
+dsw_port_migration_stats(struct dsw_port *port)
+{
+ uint64_t migration_latency;
+
+ migration_latency = (rte_get_timer_cycles() - port->migration_start);
+ port->migration_latency += migration_latency;
+ port->migrations++;
+}
+
+static void
+dsw_port_end_migration(struct dsw_evdev *dsw, struct dsw_port *port)
+{
+ uint8_t queue_id = port->migration_target_qf.queue_id;
+ uint16_t flow_hash = port->migration_target_qf.flow_hash;
+
+ port->migration_state = DSW_MIGRATION_STATE_IDLE;
+ port->seen_events_len = 0;
+
+ dsw_port_migration_stats(port);
+
+ if (dsw->queues[queue_id].schedule_type != RTE_SCHED_TYPE_PARALLEL) {
+ dsw_port_remove_paused_flow(port, queue_id, flow_hash);
+ dsw_port_flush_paused_events(dsw, port, queue_id, flow_hash);
+ }
+
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for queue_id "
+ "%d flow_hash %d.\n", queue_id, flow_hash);
+}
+
+static void
+dsw_port_consider_migration(struct dsw_evdev *dsw,
+ struct dsw_port *source_port,
+ uint64_t now)
+{
+ bool any_port_below_limit;
+ struct dsw_queue_flow *seen_events = source_port->seen_events;
+ uint16_t seen_events_len = source_port->seen_events_len;
+ struct dsw_queue_flow_burst bursts[DSW_MAX_EVENTS_RECORDED];
+ uint16_t num_bursts;
+ int16_t source_port_load;
+ int16_t port_loads[dsw->num_ports];
+
+ if (now < source_port->next_migration)
+ return;
+
+ if (dsw->num_ports == 1)
+ return;
+
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering migration.\n");
+
+ /* Randomize interval to avoid having all threads considering
+ * migration at the same in point in time, which might lead to
+ * all choosing the same target port.
+ */
+ source_port->next_migration = now +
+ source_port->migration_interval / 2 +
+ rte_rand() % source_port->migration_interval;
+
+ if (source_port->migration_state != DSW_MIGRATION_STATE_IDLE) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ "Migration already in progress.\n");
+ return;
+ }
+
+ /* For simplicity, avoid migration in the unlikely case there
+ * is still events to consume in the in_buffer (from the last
+ * migration).
+ */
+ if (source_port->in_buffer_len > 0) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are still "
+ "events in the input buffer.\n");
+ return;
+ }
+
+ source_port_load = rte_atomic16_read(&source_port->load);
+ if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ "Load %d is below threshold level %d.\n",
+ DSW_LOAD_TO_PERCENT(source_port_load),
+ DSW_LOAD_TO_PERCENT(DSW_MIN_SOURCE_LOAD_FOR_MIGRATION));
+ return;
+ }
+
+ /* Avoid starting any expensive operations (sorting etc), in
+ * case of a scenario with all ports above the load limit.
+ */
+ any_port_below_limit =
+ dsw_retrieve_port_loads(dsw, port_loads,
+ DSW_MAX_TARGET_LOAD_FOR_MIGRATION);
+ if (!any_port_below_limit) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ "Candidate target ports are all too highly "
+ "loaded.\n");
+ return;
+ }
+
+ /* Sort flows into 'bursts' to allow attempting to migrating
+ * small (but still active) flows first - this it to avoid
+ * having large flows moving around the worker cores too much
+ * (to avoid cache misses, among other things). Of course, the
+ * number of recorded events (queue+flow ids) are limited, and
+ * provides only a snapshot, so only so many conclusions can
+ * be drawn from this data.
+ */
+ num_bursts = dsw_sort_qfs_to_bursts(seen_events, seen_events_len,
+ bursts);
+ /* For non-big-little systems, there's no point in moving the
+ * only (known) flow.
+ */
+ if (num_bursts < 2) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Only a single flow "
+ "queue_id %d flow_hash %d has been seen.\n",
+ bursts[0].queue_flow.queue_id,
+ bursts[0].queue_flow.flow_hash);
+ return;
+ }
+
+ /* The strategy is to first try to find a flow to move to a
+ * port with low load (below the migration-attempt
+ * threshold). If that fails, we try to find a port which is
+ * below the max threshold, and also less loaded than this
+ * port is.
+ */
+ if (!dsw_select_migration_target(dsw, source_port, bursts, num_bursts,
+ port_loads,
+ DSW_MIN_SOURCE_LOAD_FOR_MIGRATION,
+ &source_port->migration_target_qf,
+ &source_port->migration_target_port_id)
+ &&
+ !dsw_select_migration_target(dsw, source_port, bursts, num_bursts,
+ port_loads,
+ DSW_MAX_TARGET_LOAD_FOR_MIGRATION,
+ &source_port->migration_target_qf,
+ &source_port->migration_target_port_id))
+ return;
+
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Migrating queue_id %d "
+ "flow_hash %d from port %d to port %d.\n",
+ source_port->migration_target_qf.queue_id,
+ source_port->migration_target_qf.flow_hash,
+ source_port->id, source_port->migration_target_port_id);
+
+ /* We have a winner. */
+
+ source_port->migration_state = DSW_MIGRATION_STATE_PAUSING;
+ source_port->migration_start = rte_get_timer_cycles();
+
+ /* No need to go through the whole pause procedure for
+ * parallel queues, since atomic/ordered semantics need not to
+ * be maintained.
+ */
+
+ if (dsw->queues[source_port->migration_target_qf.queue_id].schedule_type
+ == RTE_SCHED_TYPE_PARALLEL) {
+ uint8_t queue_id = source_port->migration_target_qf.queue_id;
+ uint16_t flow_hash = source_port->migration_target_qf.flow_hash;
+ uint8_t dest_port_id = source_port->migration_target_port_id;
+
+ /* Single byte-sized stores are always atomic. */
+ dsw->queues[queue_id].flow_to_port_map[flow_hash] =
+ dest_port_id;
+ rte_smp_wmb();
+
+ dsw_port_end_migration(dsw, source_port);
+
+ return;
+ }
+
+ /* There might be 'loopback' events already scheduled in the
+ * output buffers.
+ */
+ dsw_port_flush_out_buffers(dsw, source_port);
+
+ dsw_port_add_paused_flow(source_port,
+ source_port->migration_target_qf.queue_id,
+ source_port->migration_target_qf.flow_hash);
+
+ dsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_PAUS_REQ,
+ source_port->migration_target_qf.queue_id,
+ source_port->migration_target_qf.flow_hash);
+ source_port->cfm_cnt = 0;
+}
+
+static void
+dsw_port_flush_paused_events(struct dsw_evdev *dsw,
+ struct dsw_port *source_port,
+ uint8_t queue_id, uint16_t paused_flow_hash);
+
+static void
+dsw_port_handle_unpause_flow(struct dsw_evdev *dsw, struct dsw_port *port,
+ uint8_t originating_port_id, uint8_t queue_id,
+ uint16_t paused_flow_hash)
+{
+ struct dsw_ctl_msg cfm = {
+ .type = DSW_CTL_CFM,
+ .originating_port_id = port->id,
+ .queue_id = queue_id,
+ .flow_hash = paused_flow_hash
+ };
+
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Un-pausing queue_id %d flow_hash %d.\n",
+ queue_id, paused_flow_hash);
+
+ dsw_port_remove_paused_flow(port, queue_id, paused_flow_hash);
+
+ rte_smp_rmb();
+
+ dsw_port_ctl_enqueue(&dsw->ports[originating_port_id], &cfm);
+
+ dsw_port_flush_paused_events(dsw, port, queue_id, paused_flow_hash);
+}
+
+#define FORWARD_BURST_SIZE (32)
+
+static void
+dsw_port_forward_migrated_flow(struct dsw_port *source_port,
+ struct rte_event_ring *dest_ring,
+ uint8_t queue_id,
+ uint16_t flow_hash)
+{
+ uint16_t events_left;
+
+ /* Control ring message should been seen before the ring count
+ * is read on the port's in_ring.
+ */
+ rte_smp_rmb();
+
+ events_left = rte_event_ring_count(source_port->in_ring);
+
+ while (events_left > 0) {
+ uint16_t in_burst_size =
+ RTE_MIN(FORWARD_BURST_SIZE, events_left);
+ struct rte_event in_burst[in_burst_size];
+ uint16_t in_len;
+ uint16_t i;
+
+ in_len = rte_event_ring_dequeue_burst(source_port->in_ring,
+ in_burst,
+ in_burst_size, NULL);
+ /* No need to care about bursting forwarded events (to
+ * the destination port's in_ring), since migration
+ * doesn't happen very often, and also the majority of
+ * the dequeued events will likely *not* be forwarded.
+ */
+ for (i = 0; i < in_len; i++) {
+ struct rte_event *e = &in_burst[i];
+ if (e->queue_id == queue_id &&
+ dsw_flow_id_hash(e->flow_id) == flow_hash) {
+ while (rte_event_ring_enqueue_burst(dest_ring,
+ e, 1,
+ NULL) != 1)
+ rte_pause();
+ } else {
+ uint16_t last_idx = source_port->in_buffer_len;
+ source_port->in_buffer[last_idx] = *e;
+ source_port->in_buffer_len++;
+ }
+ }
+
+ events_left -= in_len;
+ }
+}
+
+static void
+dsw_port_move_migrating_flow(struct dsw_evdev *dsw,
+ struct dsw_port *source_port)
+{
+ uint8_t queue_id = source_port->migration_target_qf.queue_id;
+ uint16_t flow_hash = source_port->migration_target_qf.flow_hash;
+ uint8_t dest_port_id = source_port->migration_target_port_id;
+ struct dsw_port *dest_port = &dsw->ports[dest_port_id];
+
+ dsw_port_flush_out_buffers(dsw, source_port);
+
+ rte_smp_wmb();
+
+ dsw->queues[queue_id].flow_to_port_map[flow_hash] =
+ dest_port_id;
+
+ dsw_port_forward_migrated_flow(source_port, dest_port->in_ring,
+ queue_id, flow_hash);
+
+ /* Flow table update and migration destination port's enqueues
+ * must be seen before the control message.
+ */
+ rte_smp_wmb();
+
+ dsw_port_ctl_broadcast(dsw, source_port, DSW_CTL_UNPAUS_REQ, queue_id,
+ flow_hash);
+ source_port->cfm_cnt = 0;
+ source_port->migration_state = DSW_MIGRATION_STATE_UNPAUSING;
+}
+
+static void
+dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port)
+{
+ port->cfm_cnt++;
+
+ if (port->cfm_cnt == (dsw->num_ports-1)) {
+ switch (port->migration_state) {
+ case DSW_MIGRATION_STATE_PAUSING:
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Going into forwarding "
+ "migration state.\n");
+ port->migration_state = DSW_MIGRATION_STATE_FORWARDING;
+ break;
+ case DSW_MIGRATION_STATE_UNPAUSING:
+ dsw_port_end_migration(dsw, port);
+ break;
+ default:
+ RTE_ASSERT(0);
+ break;
+ }
+ }
+}
+
+static void
+dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)
+{
+ struct dsw_ctl_msg msg;
+
+ /* So any table loads happens before the ring dequeue, in the
+ * case of a 'paus' message.
+ */
+ rte_smp_rmb();
+
+ if (dsw_port_ctl_dequeue(port, &msg) == 0) {
+ switch (msg.type) {
+ case DSW_CTL_PAUS_REQ:
+ dsw_port_handle_pause_flow(dsw, port,
+ msg.originating_port_id,
+ msg.queue_id, msg.flow_hash);
+ break;
+ case DSW_CTL_UNPAUS_REQ:
+ dsw_port_handle_unpause_flow(dsw, port,
+ msg.originating_port_id,
+ msg.queue_id,
+ msg.flow_hash);
+ break;
+ case DSW_CTL_CFM:
+ dsw_port_handle_confirm(dsw, port);
+ break;
+ }
+ }
+}
+
+static void
+dsw_port_note_op(struct dsw_port *port, uint16_t num_events)
+{
+ /* To pull the control ring reasonbly often on busy ports,
+ * each dequeued/enqueued event is considered an 'op' too.
+ */
+ port->ops_since_bg_task += (num_events+1);
+}
+
+static void
+dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port)
+{
+ if (unlikely(port->migration_state == DSW_MIGRATION_STATE_FORWARDING &&
+ port->pending_releases == 0))
+ dsw_port_move_migrating_flow(dsw, port);
+
+ /* Polling the control ring is relatively inexpensive, and
+ * polling it often helps bringing down migration latency, so
+ * do this for every iteration.
+ */
+ dsw_port_ctl_process(dsw, port);
+
+ /* To avoid considering migration and flushing output buffers
+ * on every dequeue/enqueue call, the scheduler only performs
+ * such 'background' tasks every nth
+ * (i.e. DSW_MAX_PORT_OPS_PER_BG_TASK) operation.
+ */
+ if (unlikely(port->ops_since_bg_task >= DSW_MAX_PORT_OPS_PER_BG_TASK)) {
+ uint64_t now;
+
+ now = rte_get_timer_cycles();
+
+ port->last_bg = now;
+
+ /* Logic to avoid having events linger in the output
+ * buffer too long.
+ */
+ dsw_port_flush_out_buffers(dsw, port);
+
+ dsw_port_consider_load_update(port, now);
+
+ dsw_port_consider_migration(dsw, port, now);
+
+ port->ops_since_bg_task = 0;
+ }
+}
+
+static void
+dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port)
+{
+ uint16_t dest_port_id;
+
+ for (dest_port_id = 0; dest_port_id < dsw->num_ports; dest_port_id++)
+ dsw_port_transmit_buffered(dsw, source_port, dest_port_id);
+}
+
+uint16_t
+dsw_event_enqueue(void *port, const struct rte_event *ev)
+{
+ return dsw_event_enqueue_burst(port, ev, unlikely(ev == NULL) ? 0 : 1);
+}
+
+static __rte_always_inline uint16_t
+dsw_event_enqueue_burst_generic(void *port, const struct rte_event events[],
+ uint16_t events_len, bool op_types_known,
+ uint16_t num_new, uint16_t num_release,
+ uint16_t num_non_release)
+{
+ struct dsw_port *source_port = port;
+ struct dsw_evdev *dsw = source_port->dsw;
+ bool enough_credits;
+ uint16_t i;
+
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Attempting to enqueue %d "
+ "events to port %d.\n", events_len, source_port->id);
+
+ dsw_port_bg_process(dsw, source_port);
+
+ /* XXX: For performance (=ring efficiency) reasons, the
+ * scheduler relies on internal non-ring buffers instead of
+ * immediately sending the event to the destination ring. For
+ * a producer that doesn't intend to produce or consume any
+ * more events, the scheduler provides a way to flush the
+ * buffer, by means of doing an enqueue of zero events. In
+ * addition, a port cannot be left "unattended" (e.g. unused)
+ * for long periods of time, since that would stall
+ * migration. Eventdev API extensions to provide a cleaner way
+ * to archieve both of these functions should be
+ * considered.
+ */
+ if (unlikely(events_len == 0)) {
+ dsw_port_note_op(source_port, DSW_MAX_PORT_OPS_PER_BG_TASK);
+ return 0;
+ }
+
+ if (unlikely(events_len > source_port->enqueue_depth))
+ events_len = source_port->enqueue_depth;
+
+ dsw_port_note_op(source_port, events_len);
+
+ if (!op_types_known)
+ for (i = 0; i < events_len; i++) {
+ switch (events[i].op) {
+ case RTE_EVENT_OP_RELEASE:
+ num_release++;
+ break;
+ case RTE_EVENT_OP_NEW:
+ num_new++;
+ /* Falls through. */
+ default:
+ num_non_release++;
+ break;
+ }
+ }
+
+ /* Technically, we could allow the non-new events up to the
+ * first new event in the array into the system, but for
+ * simplicity reasons, we deny the whole burst if the port is
+ * above the water mark.
+ */
+ if (unlikely(num_new > 0 && rte_atomic32_read(&dsw->credits_on_loan) >
+ source_port->new_event_threshold))
+ return 0;
+
+ enough_credits = dsw_port_acquire_credits(dsw, source_port,
+ num_non_release);
+ if (unlikely(!enough_credits))
+ return 0;
+
+ source_port->pending_releases -= num_release;
+
+ dsw_port_enqueue_stats(source_port, num_new,
+ num_non_release-num_new, num_release);
+
+ for (i = 0; i < events_len; i++) {
+ const struct rte_event *event = &events[i];
+
+ if (likely(num_release == 0 ||
+ event->op != RTE_EVENT_OP_RELEASE))
+ dsw_port_buffer_event(dsw, source_port, event);
+ dsw_port_queue_enqueue_stats(source_port, event->queue_id);
+ }
+
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "%d non-release events "
+ "accepted.\n", num_non_release);
+
+ return num_non_release;
+}
+
+uint16_t
+dsw_event_enqueue_burst(void *port, const struct rte_event events[],
+ uint16_t events_len)
+{
+ return dsw_event_enqueue_burst_generic(port, events, events_len, false,
+ 0, 0, 0);
+}
+
+uint16_t
+dsw_event_enqueue_new_burst(void *port, const struct rte_event events[],
+ uint16_t events_len)
+{
+ return dsw_event_enqueue_burst_generic(port, events, events_len, true,
+ events_len, 0, events_len);
+}
+
+uint16_t
+dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[],
+ uint16_t events_len)
+{
+ return dsw_event_enqueue_burst_generic(port, events, events_len, true,
+ 0, 0, events_len);
+}
+
+uint16_t
+dsw_event_dequeue(void *port, struct rte_event *events, uint64_t wait)
+{
+ return dsw_event_dequeue_burst(port, events, 1, wait);
+}
+
+static void
+dsw_port_record_seen_events(struct dsw_port *port, struct rte_event *events,
+ uint16_t num)
+{
+ uint16_t i;
+
+ dsw_port_dequeue_stats(port, num);
+
+ for (i = 0; i < num; i++) {
+ uint16_t l_idx = port->seen_events_idx;
+ struct dsw_queue_flow *qf = &port->seen_events[l_idx];
+ struct rte_event *event = &events[i];
+ qf->queue_id = event->queue_id;
+ qf->flow_hash = dsw_flow_id_hash(event->flow_id);
+
+ port->seen_events_idx = (l_idx+1) % DSW_MAX_EVENTS_RECORDED;
+
+ dsw_port_queue_dequeued_stats(port, event->queue_id);
+ }
+
+ if (unlikely(port->seen_events_len != DSW_MAX_EVENTS_RECORDED))
+ port->seen_events_len =
+ RTE_MIN(port->seen_events_len + num,
+ DSW_MAX_EVENTS_RECORDED);
+}
+
+#ifdef DSW_SORT_DEQUEUED
+
+#define DSW_EVENT_TO_INT(_event) \
+ ((int)((((_event)->queue_id)<<16)|((_event)->flow_id)))
+
+static inline int
+dsw_cmp_event(const void *v_event_a, const void *v_event_b)
+{
+ const struct rte_event *event_a = v_event_a;
+ const struct rte_event *event_b = v_event_b;
+
+ return DSW_EVENT_TO_INT(event_a) - DSW_EVENT_TO_INT(event_b);
+}
+#endif
+
+static uint16_t
+dsw_port_dequeue_burst(struct dsw_port *port, struct rte_event *events,
+ uint16_t num)
+{
+ struct dsw_port *source_port = port;
+ struct dsw_evdev *dsw = source_port->dsw;
+
+ dsw_port_ctl_process(dsw, source_port);
+
+ if (unlikely(port->in_buffer_len > 0)) {
+ uint16_t dequeued = RTE_MIN(num, port->in_buffer_len);
+
+ rte_memcpy(events, &port->in_buffer[port->in_buffer_start],
+ dequeued * sizeof(struct rte_event));
+
+ port->in_buffer_start += dequeued;
+ port->in_buffer_len -= dequeued;
+
+ if (port->in_buffer_len == 0)
+ port->in_buffer_start = 0;
+
+ return dequeued;
+ }
+
+ return rte_event_ring_dequeue_burst(port->in_ring, events, num, NULL);
+}
+
+uint16_t
+dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,
+ uint64_t wait __rte_unused)
+{
+ struct dsw_port *source_port = port;
+ struct dsw_evdev *dsw = source_port->dsw;
+ uint16_t dequeued;
+
+ source_port->pending_releases = 0;
+
+ dsw_port_bg_process(dsw, source_port);
+
+ if (unlikely(num > source_port->dequeue_depth))
+ num = source_port->dequeue_depth;
+
+ dequeued = dsw_port_dequeue_burst(source_port, events, num);
+
+ source_port->pending_releases = dequeued;
+
+ dsw_port_load_record(source_port, dequeued);
+
+ dsw_port_note_op(source_port, dequeued);
+
+ if (dequeued > 0) {
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Dequeued %d events.\n",
+ dequeued);
+
+ dsw_port_return_credits(dsw, source_port, dequeued);
+
+ /* One potential optimization one might think of is to
+ * add a migration state (prior to 'pausing'), and
+ * only record seen events when the port is in this
+ * state (and transit to 'pausing' when enough events
+ * have been gathered). However, that schema doesn't
+ * seem to improve performance.
+ */
+ dsw_port_record_seen_events(port, events, dequeued);
+ }
+ /* XXX: Assuming the port can't produce any more work,
+ * consider flushing the output buffer, on dequeued ==
+ * 0.
+ */
+
+#ifdef DSW_SORT_DEQUEUED
+ dsw_stable_sort(events, dequeued, sizeof(events[0]), dsw_cmp_event);
+#endif
+
+ return dequeued;
+}
diff --git a/drivers/event/dsw/dsw_sort.h b/drivers/event/dsw/dsw_sort.h
new file mode 100644
index 00000000..609767fd
--- /dev/null
+++ b/drivers/event/dsw/dsw_sort.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Ericsson AB
+ */
+
+#ifndef _DSW_SORT_
+#define _DSW_SORT_
+
+#include <string.h>
+
+#include <rte_common.h>
+
+#define DSW_ARY_ELEM_PTR(_ary, _idx, _elem_size) \
+ RTE_PTR_ADD(_ary, (_idx) * (_elem_size))
+
+#define DSW_ARY_ELEM_SWAP(_ary, _a_idx, _b_idx, _elem_size) \
+ do { \
+ char tmp[_elem_size]; \
+ void *_a_ptr = DSW_ARY_ELEM_PTR(_ary, _a_idx, _elem_size); \
+ void *_b_ptr = DSW_ARY_ELEM_PTR(_ary, _b_idx, _elem_size); \
+ memcpy(tmp, _a_ptr, _elem_size); \
+ memcpy(_a_ptr, _b_ptr, _elem_size); \
+ memcpy(_b_ptr, tmp, _elem_size); \
+ } while (0)
+
+static inline void
+dsw_insertion_sort(void *ary, uint16_t len, uint16_t elem_size,
+ int (*cmp_fn)(const void *, const void *))
+{
+ uint16_t i;
+
+ for (i = 1; i < len; i++) {
+ uint16_t j;
+ for (j = i; j > 0 &&
+ cmp_fn(DSW_ARY_ELEM_PTR(ary, j-1, elem_size),
+ DSW_ARY_ELEM_PTR(ary, j, elem_size)) > 0;
+ j--)
+ DSW_ARY_ELEM_SWAP(ary, j, j-1, elem_size);
+ }
+}
+
+static inline void
+dsw_stable_sort(void *ary, uint16_t len, uint16_t elem_size,
+ int (*cmp_fn)(const void *, const void *))
+{
+ dsw_insertion_sort(ary, len, elem_size, cmp_fn);
+}
+
+#endif
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
new file mode 100644
index 00000000..bf2eec52
--- /dev/null
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Ericsson AB
+ */
+
+#include "dsw_evdev.h"
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <rte_debug.h>
+
+/* The high bits in the xstats id is used to store an additional
+ * parameter (beyond the queue or port id already in the xstats
+ * interface).
+ */
+#define DSW_XSTATS_ID_PARAM_BITS (8)
+#define DSW_XSTATS_ID_STAT_BITS \
+ (sizeof(unsigned int)*CHAR_BIT - DSW_XSTATS_ID_PARAM_BITS)
+#define DSW_XSTATS_ID_STAT_MASK ((1 << DSW_XSTATS_ID_STAT_BITS) - 1)
+
+#define DSW_XSTATS_ID_GET_PARAM(id) \
+ ((id)>>DSW_XSTATS_ID_STAT_BITS)
+
+#define DSW_XSTATS_ID_GET_STAT(id) \
+ ((id) & DSW_XSTATS_ID_STAT_MASK)
+
+#define DSW_XSTATS_ID_CREATE(id, param_value) \
+ (((param_value) << DSW_XSTATS_ID_STAT_BITS) | id)
+
+typedef
+uint64_t (*dsw_xstats_dev_get_value_fn)(struct dsw_evdev *dsw);
+
+struct dsw_xstat_dev {
+ const char *name;
+ dsw_xstats_dev_get_value_fn get_value_fn;
+};
+
+typedef
+uint64_t (*dsw_xstats_port_get_value_fn)(struct dsw_evdev *dsw,
+ uint8_t port_id, uint8_t queue_id);
+
+struct dsw_xstats_port {
+ const char *name_fmt;
+ dsw_xstats_port_get_value_fn get_value_fn;
+ bool per_queue;
+};
+
+static uint64_t
+dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
+{
+ return rte_atomic32_read(&dsw->credits_on_loan);
+}
+
+static struct dsw_xstat_dev dsw_dev_xstats[] = {
+ { "dev_credits_on_loan", dsw_xstats_dev_credits_on_loan }
+};
+
+#define DSW_GEN_PORT_ACCESS_FN(_variable) \
+ static uint64_t \
+ dsw_xstats_port_get_ ## _variable(struct dsw_evdev *dsw, \
+ uint8_t port_id, \
+ uint8_t queue_id __rte_unused) \
+ { \
+ return dsw->ports[port_id]._variable; \
+ }
+
+DSW_GEN_PORT_ACCESS_FN(new_enqueued)
+DSW_GEN_PORT_ACCESS_FN(forward_enqueued)
+DSW_GEN_PORT_ACCESS_FN(release_enqueued)
+
+static uint64_t
+dsw_xstats_port_get_queue_enqueued(struct dsw_evdev *dsw, uint8_t port_id,
+ uint8_t queue_id)
+{
+ return dsw->ports[port_id].queue_enqueued[queue_id];
+}
+
+DSW_GEN_PORT_ACCESS_FN(dequeued)
+
+static uint64_t
+dsw_xstats_port_get_queue_dequeued(struct dsw_evdev *dsw, uint8_t port_id,
+ uint8_t queue_id)
+{
+ return dsw->ports[port_id].queue_dequeued[queue_id];
+}
+
+DSW_GEN_PORT_ACCESS_FN(migrations)
+
+static uint64_t
+dsw_xstats_port_get_migration_latency(struct dsw_evdev *dsw, uint8_t port_id,
+ uint8_t queue_id __rte_unused)
+{
+ uint64_t total_latency = dsw->ports[port_id].migration_latency;
+ uint64_t num_migrations = dsw->ports[port_id].migrations;
+
+ return num_migrations > 0 ? total_latency / num_migrations : 0;
+}
+
+static uint64_t
+dsw_xstats_port_get_event_proc_latency(struct dsw_evdev *dsw, uint8_t port_id,
+ uint8_t queue_id __rte_unused)
+{
+ uint64_t total_busy_cycles =
+ dsw->ports[port_id].total_busy_cycles;
+ uint64_t dequeued =
+ dsw->ports[port_id].dequeued;
+
+ return dequeued > 0 ? total_busy_cycles / dequeued : 0;
+}
+
+DSW_GEN_PORT_ACCESS_FN(inflight_credits)
+
+static uint64_t
+dsw_xstats_port_get_load(struct dsw_evdev *dsw, uint8_t port_id,
+ uint8_t queue_id __rte_unused)
+{
+ int16_t load;
+
+ load = rte_atomic16_read(&dsw->ports[port_id].load);
+
+ return DSW_LOAD_TO_PERCENT(load);
+}
+
+DSW_GEN_PORT_ACCESS_FN(last_bg)
+
+static struct dsw_xstats_port dsw_port_xstats[] = {
+ { "port_%u_new_enqueued", dsw_xstats_port_get_new_enqueued,
+ false },
+ { "port_%u_forward_enqueued", dsw_xstats_port_get_forward_enqueued,
+ false },
+ { "port_%u_release_enqueued", dsw_xstats_port_get_release_enqueued,
+ false },
+ { "port_%u_queue_%u_enqueued", dsw_xstats_port_get_queue_enqueued,
+ true },
+ { "port_%u_dequeued", dsw_xstats_port_get_dequeued,
+ false },
+ { "port_%u_queue_%u_dequeued", dsw_xstats_port_get_queue_dequeued,
+ true },
+ { "port_%u_migrations", dsw_xstats_port_get_migrations,
+ false },
+ { "port_%u_migration_latency", dsw_xstats_port_get_migration_latency,
+ false },
+ { "port_%u_event_proc_latency", dsw_xstats_port_get_event_proc_latency,
+ false },
+ { "port_%u_inflight_credits", dsw_xstats_port_get_inflight_credits,
+ false },
+ { "port_%u_load", dsw_xstats_port_get_load,
+ false },
+ { "port_%u_last_bg", dsw_xstats_port_get_last_bg,
+ false }
+};
+
+static int
+dsw_xstats_dev_get_names(struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(dsw_dev_xstats) && i < size; i++) {
+ ids[i] = i;
+ strcpy(xstats_names[i].name, dsw_dev_xstats[i].name);
+ }
+
+ return i;
+}
+
+static int
+dsw_xstats_port_get_names(struct dsw_evdev *dsw, uint8_t port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ uint8_t queue_id = 0;
+ unsigned int id_idx;
+ unsigned int stat_idx;
+
+ for (id_idx = 0, stat_idx = 0;
+ id_idx < size && stat_idx < RTE_DIM(dsw_port_xstats);
+ id_idx++) {
+ struct dsw_xstats_port *xstat = &dsw_port_xstats[stat_idx];
+
+ if (xstat->per_queue) {
+ ids[id_idx] = DSW_XSTATS_ID_CREATE(stat_idx, queue_id);
+ snprintf(xstats_names[id_idx].name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE,
+ dsw_port_xstats[stat_idx].name_fmt, port_id,
+ queue_id);
+ queue_id++;
+ } else {
+ ids[id_idx] = stat_idx;
+ snprintf(xstats_names[id_idx].name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE,
+ dsw_port_xstats[stat_idx].name_fmt, port_id);
+ }
+
+ if (!(xstat->per_queue && queue_id < dsw->num_queues)) {
+ stat_idx++;
+ queue_id = 0;
+ }
+ }
+ return id_idx;
+}
+
+int
+dsw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ return dsw_xstats_dev_get_names(xstats_names, ids, size);
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ return dsw_xstats_port_get_names(dsw, queue_port_id,
+ xstats_names, ids, size);
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ return 0;
+ default:
+ RTE_ASSERT(false);
+ return -1;
+ }
+}
+
+static int
+dsw_xstats_dev_get(const struct rte_eventdev *dev,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ unsigned int id = ids[i];
+ struct dsw_xstat_dev *xstat = &dsw_dev_xstats[id];
+ values[i] = xstat->get_value_fn(dsw);
+ }
+ return n;
+}
+
+static int
+dsw_xstats_port_get(const struct rte_eventdev *dev, uint8_t port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ unsigned int id = ids[i];
+ unsigned int stat_idx = DSW_XSTATS_ID_GET_STAT(id);
+ struct dsw_xstats_port *xstat = &dsw_port_xstats[stat_idx];
+ uint8_t queue_id = 0;
+
+ if (xstat->per_queue)
+ queue_id = DSW_XSTATS_ID_GET_PARAM(id);
+
+ values[i] = xstat->get_value_fn(dsw, port_id, queue_id);
+ }
+ return n;
+}
+
+int
+dsw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ return dsw_xstats_dev_get(dev, ids, values, n);
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ return dsw_xstats_port_get(dev, queue_port_id, ids, values, n);
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ return 0;
+ default:
+ RTE_ASSERT(false);
+ return -1;
+ }
+ return 0;
+}
+
+uint64_t dsw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(name);
+ RTE_SET_USED(id);
+ return 0;
+}
diff --git a/drivers/event/dsw/meson.build b/drivers/event/dsw/meson.build
new file mode 100644
index 00000000..a6b7bfa5
--- /dev/null
+++ b/drivers/event/dsw/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Ericsson AB
+
+allow_experimental_apis = true
+deps += ['bus_vdev']
+sources = files('dsw_evdev.c', 'dsw_event.c', 'dsw_xstats.c')
diff --git a/drivers/event/dsw/rte_pmd_dsw_event_version.map b/drivers/event/dsw/rte_pmd_dsw_event_version.map
new file mode 100644
index 00000000..24bd5cdb
--- /dev/null
+++ b/drivers/event/dsw/rte_pmd_dsw_event_version.map
@@ -0,0 +1,3 @@
+DPDK_18.11 {
+ local: *;
+};
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
index e9511993..836ecbb7 100644
--- a/drivers/event/meson.build
+++ b/drivers/event/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['dpaa', 'dpaa2', 'octeontx', 'skeleton', 'sw']
+drivers = ['dpaa', 'dpaa2', 'octeontx', 'opdl', 'skeleton', 'sw', 'dsw']
std_deps = ['eventdev', 'kvargs']
config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 90ad2217..2e07890b 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -17,7 +17,7 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx
LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
-LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_bus_vdev -lrte_ethdev
EXPORT_MAP := rte_pmd_octeontx_event_version.map
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 16a3a04b..a273d4c9 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -146,6 +146,7 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
dev->enqueue_forward_burst = ssows_enq_fwd_burst;
dev->dequeue = ssows_deq;
dev->dequeue_burst = ssows_deq_burst;
+ dev->txa_enqueue = sso_event_tx_adapter_enqueue;
if (edev->is_timeout_deq) {
dev->dequeue = ssows_deq_timeout;
@@ -454,7 +455,6 @@ ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
const struct octeontx_nic *nic = eth_dev->data->dev_private;
pki_del_qos_t pki_qos;
RTE_SET_USED(dev);
- RTE_SET_USED(rx_queue_id);
ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
if (ret)
@@ -466,7 +466,7 @@ ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
if (ret < 0)
ssovf_log_err("Failed to delete QOS port=%d, q=%d",
- nic->port_id, queue_conf->ev.queue_id);
+ nic->port_id, rx_queue_id);
return ret;
}
@@ -491,6 +491,77 @@ ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
return 0;
}
+static int
+ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int ret;
+ RTE_SET_USED(dev);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ *caps = 0;
+ else
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+
+ return 0;
+}
+
+static int
+ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+ return 0;
+}
+
+static int
+ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+ return 0;
+}
+
+static int
+ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+ RTE_SET_USED(tx_queue_id);
+ return 0;
+}
+
+static int
+ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+ RTE_SET_USED(tx_queue_id);
+ return 0;
+}
+
+static int
+ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+ return 0;
+}
+
+static int
+ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+ return 0;
+}
+
+
static void
ssovf_dump(struct rte_eventdev *dev, FILE *f)
{
@@ -619,6 +690,14 @@ static struct rte_eventdev_ops ssovf_ops = {
.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+ .eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get,
+ .eth_tx_adapter_create = ssovf_eth_tx_adapter_create,
+ .eth_tx_adapter_free = ssovf_eth_tx_adapter_free,
+ .eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add,
+ .eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del,
+ .eth_tx_adapter_start = ssovf_eth_tx_adapter_start,
+ .eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop,
+
.timer_adapter_caps_get = ssovf_timvf_caps_get,
.dev_selftest = test_eventdev_octeontx,
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 18293e96..0e622152 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -5,6 +5,7 @@
#ifndef __SSOVF_EVDEV_H__
#define __SSOVF_EVDEV_H__
+#include <rte_event_eth_tx_adapter.h>
#include <rte_eventdev_pmd_vdev.h>
#include <rte_io.h>
@@ -83,7 +84,7 @@
#define SSOVF_SELFTEST_ARG ("selftest")
/*
- * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * In Cavium OCTEON TX SoC, all accesses to the device registers are
* implictly strongly ordered. So, The relaxed version of IO operation is
* safe to use with out any IO memory barriers.
*/
@@ -179,6 +180,8 @@ typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
ssows_handle_event_t fn, void *arg);
void ssows_reset(struct ssows *ws);
+uint16_t sso_event_tx_adapter_enqueue(void *port,
+ struct rte_event ev[], uint16_t nb_events);
int ssovf_info(struct ssovf_info *info);
void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
int test_eventdev_octeontx(void);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index fffa9024..d940b5dd 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -261,3 +261,47 @@ ssows_reset(struct ssows *ws)
ssows_swtag_untag(ws);
}
}
+
+uint16_t
+sso_event_tx_adapter_enqueue(void *port,
+ struct rte_event ev[], uint16_t nb_events)
+{
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct rte_mbuf *m;
+ struct rte_eth_dev *ethdev;
+ struct ssows *ws = port;
+ struct octeontx_txq *txq;
+ octeontx_dq_t *dq;
+
+ RTE_SET_USED(nb_events);
+ switch (ev->sched_type) {
+ case SSO_SYNC_ORDERED:
+ ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
+ rte_cio_wmb();
+ ssows_swtag_wait(ws);
+ break;
+ case SSO_SYNC_UNTAGGED:
+ ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
+ ev->queue_id);
+ rte_cio_wmb();
+ ssows_swtag_wait(ws);
+ break;
+ case SSO_SYNC_ATOMIC:
+ rte_cio_wmb();
+ break;
+ }
+
+ m = ev[0].mbuf;
+ port_id = m->port;
+ queue_id = rte_event_eth_tx_adapter_txq_get(m);
+ ethdev = &rte_eth_devices[port_id];
+ txq = ethdev->data->tx_queues[queue_id];
+ dq = &txq->dq;
+
+ if (__octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va, dq->fc_status_va,
+ m) < 0)
+ return 0;
+
+ return 1;
+}
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 7c7306b5..d1d3a52a 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -42,6 +42,7 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
mbuf->ol_flags = 0;
mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
rte_mbuf_refcnt_set(mbuf, 1);
+
return mbuf;
}
diff --git a/drivers/event/opdl/Makefile b/drivers/event/opdl/Makefile
index cea8118d..bf50a60a 100644
--- a/drivers/event/opdl/Makefile
+++ b/drivers/event/opdl/Makefile
@@ -24,7 +24,7 @@ LDLIBS += -lrte_bus_vdev -lrte_mbuf -lrte_mempool
LIBABIVER := 1
# versioning export map
-EXPORT_MAP := rte_pmd_evdev_opdl_version.map
+EXPORT_MAP := rte_pmd_opdl_event_version.map
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_ring.c
diff --git a/drivers/event/opdl/meson.build b/drivers/event/opdl/meson.build
new file mode 100644
index 00000000..cc6029c6
--- /dev/null
+++ b/drivers/event/opdl/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+sources = files(
+ 'opdl_evdev.c',
+ 'opdl_evdev_init.c',
+ 'opdl_evdev_xstats.c',
+ 'opdl_ring.c',
+ 'opdl_test.c',
+)
+deps += ['bus_vdev']
diff --git a/drivers/event/opdl/rte_pmd_evdev_opdl_version.map b/drivers/event/opdl/rte_pmd_opdl_event_version.map
index 58b94270..58b94270 100644
--- a/drivers/event/opdl/rte_pmd_evdev_opdl_version.map
+++ b/drivers/event/opdl/rte_pmd_opdl_event_version.map
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index a6bb9138..1175d6cd 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -113,10 +113,22 @@ sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
}
}
}
+
+ p->unlinks_in_progress += unlinked;
+ rte_smp_mb();
+
return unlinked;
}
static int
+sw_port_unlinks_in_progress(struct rte_eventdev *dev, void *port)
+{
+ RTE_SET_USED(dev);
+ struct sw_port *p = port;
+ return p->unlinks_in_progress;
+}
+
+static int
sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
const struct rte_event_port_conf *conf)
{
@@ -925,6 +937,7 @@ sw_probe(struct rte_vdev_device *vdev)
.port_release = sw_port_release,
.port_link = sw_port_link,
.port_unlink = sw_port_unlink,
+ .port_unlinks_in_progress = sw_port_unlinks_in_progress,
.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index d90b96d4..7c77b249 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -148,6 +148,14 @@ struct sw_port {
/* A numeric ID for the port */
uint8_t id;
+ /* An atomic counter for when the port has been unlinked, and the
+ * scheduler has not yet acked this unlink - hence there may still be
+ * events in the buffers going to the port. When the unlinks in
+ * progress is read by the scheduler, no more events will be pushed to
+ * the port - hence the scheduler core can just assign zero.
+ */
+ uint8_t unlinks_in_progress;
+
int16_t is_directed; /** Takes from a single directed QID */
/**
* For loadbalanced we can optimise pulling packets from
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index e3a41e02..cff747da 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -51,9 +51,11 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
int cq = fid->cq;
if (cq < 0) {
- uint32_t cq_idx = qid->cq_next_tx++;
- if (qid->cq_next_tx == qid->cq_num_mapped_cqs)
+ uint32_t cq_idx;
+ if (qid->cq_next_tx >= qid->cq_num_mapped_cqs)
qid->cq_next_tx = 0;
+ cq_idx = qid->cq_next_tx++;
+
cq = qid->cq_map[cq_idx];
/* find least used */
@@ -140,9 +142,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
do {
if (++cq_check_count > qid->cq_num_mapped_cqs)
goto exit;
- cq = qid->cq_map[cq_idx];
- if (++cq_idx == qid->cq_num_mapped_cqs)
+ if (cq_idx >= qid->cq_num_mapped_cqs)
cq_idx = 0;
+ cq = qid->cq_map[cq_idx++];
+
} while (rte_event_ring_free_count(
sw->ports[cq].cq_worker_ring) == 0 ||
sw->ports[cq].inflights == SW_PORT_HIST_LIST);
@@ -220,7 +223,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw)
int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask);
/* zero mapped CQs indicates directed */
- if (iq_num >= SW_IQS_MAX)
+ if (iq_num >= SW_IQS_MAX || qid->cq_num_mapped_cqs == 0)
continue;
uint32_t pkts_done = 0;
@@ -517,13 +520,18 @@ sw_event_schedule(struct rte_eventdev *dev)
/* Pull from rx_ring for ports */
do {
in_pkts = 0;
- for (i = 0; i < sw->port_count; i++)
+ for (i = 0; i < sw->port_count; i++) {
+ /* ack the unlinks in progress as done */
+ if (sw->ports[i].unlinks_in_progress)
+ sw->ports[i].unlinks_in_progress = 0;
+
if (sw->ports[i].is_directed)
in_pkts += sw_schedule_pull_port_dir(sw, i);
else if (sw->ports[i].num_ordered_qids > 0)
in_pkts += sw_schedule_pull_port_lb(sw, i);
else
in_pkts += sw_schedule_pull_port_no_reorder(sw, i);
+ }
/* QID scan for re-ordered */
in_pkts += sw_schedule_reorder(sw, 0,
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index c40912db..d00d5de6 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -1904,6 +1904,77 @@ qid_priorities(struct test *t)
}
static int
+unlink_in_progress(struct test *t)
+{
+ /* Test unlinking API, in particular that when an unlink request has
+ * not yet been seen by the scheduler thread, that the
+ * unlink_in_progress() function returns the number of unlinks.
+ */
+ unsigned int i;
+ /* Create instance with 1 ports, and 3 qids */
+ if (init(t, 3, 1) < 0 ||
+ create_ports(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* Create QID */
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ /* increase priority (0 == highest), as we go */
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+ t->nb_qids = i;
+ /* map all QIDs to port */
+ rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* unlink all ports to have outstanding unlink requests */
+ int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
+ if (ret < 0) {
+ printf("%d: Failed to unlink queues\n", __LINE__);
+ return -1;
+ }
+
+ /* get active unlinks here, expect 3 */
+ int unlinks_in_progress =
+ rte_event_port_unlinks_in_progress(evdev, t->port[0]);
+ if (unlinks_in_progress != 3) {
+ printf("%d: Expected num unlinks in progress == 3, got %d\n",
+ __LINE__, unlinks_in_progress);
+ return -1;
+ }
+
+ /* run scheduler service on this thread to ack the unlinks */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* active unlinks expected as 0 as scheduler thread has acked */
+ unlinks_in_progress =
+ rte_event_port_unlinks_in_progress(evdev, t->port[0]);
+ if (unlinks_in_progress != 0) {
+ printf("%d: Expected num unlinks in progress == 0, got %d\n",
+ __LINE__, unlinks_in_progress);
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
load_balancing(struct test *t)
{
const int rx_enq = 0;
@@ -3260,6 +3331,12 @@ test_sw_eventdev(void)
printf("ERROR - QID Priority test FAILED.\n");
goto test_fail;
}
+ printf("*** Running Unlink-in-progress test...\n");
+ ret = unlink_in_progress(t);
+ if (ret != 0) {
+ printf("ERROR - Unlink in progress test FAILED.\n");
+ goto test_fail;
+ }
printf("*** Running Ordered Reconfigure test...\n");
ret = ordered_reconfigure(t);
if (ret != 0) {
diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile
index da8da1e9..ead5029f 100644
--- a/drivers/mempool/dpaa/Makefile
+++ b/drivers/mempool/dpaa/Makefile
@@ -10,7 +10,6 @@ LIB = librte_mempool_dpaa.a
CFLAGS := -I$(SRCDIR) $(CFLAGS)
CFLAGS += -O3 $(WERROR_FLAGS)
-CFLAGS += -D _GNU_SOURCE
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
@@ -31,5 +30,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
LDLIBS += -lrte_bus_dpaa
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index 10c536bf..021b366f 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -26,6 +26,7 @@
#include <rte_ring.h>
#include <dpaa_mempool.h>
+#include <dpaax_iova_table.h>
/* List of all the memseg information locally maintained in dpaa driver. This
* is to optimize the PA_to_VA searches until a better mechanism (algo) is
@@ -122,7 +123,7 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
struct bm_buffer buf;
int ret;
- DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d",
+ DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
addr, bp_info->bpid);
bm_buffer_set64(&buf, addr);
@@ -285,6 +286,9 @@ dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
return 0;
}
+ /* Update the PA-VA Table */
+ dpaax_iova_table_update(paddr, vaddr, len);
+
bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
@@ -324,7 +328,7 @@ dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
obj_cb, obj_cb_arg);
}
-struct rte_mempool_ops dpaa_mpool_ops = {
+static const struct rte_mempool_ops dpaa_mpool_ops = {
.name = DPAA_MEMPOOL_OPS_NAME,
.alloc = dpaa_mbuf_create_pool,
.free = dpaa_mbuf_free_pool,
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 092f326c..533e1c6e 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -43,10 +43,8 @@ struct dpaa_bp_info {
};
static inline void *
-DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
+DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info __rte_unused, uint64_t addr)
{
- if (bp_info->ptov_off)
- return ((void *) (size_t)(addr + bp_info->ptov_off));
return rte_dpaa_mem_ptov(addr);
}
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index 9e4c87d7..96c0f2b6 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -19,7 +19,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
EXPORT_MAP := rte_mempool_dpaa2_version.map
# Lbrary version
-LIBABIVER := 1
+LIBABIVER := 2
# depends on fslmc bus which uses experimental API
CFLAGS += -DALLOW_EXPERIMENTAL_API
@@ -30,6 +30,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+LDLIBS += -lrte_common_dpaax
SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL)-include := rte_dpaa2_mempool.h
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 7d0435f5..790cded8 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -30,16 +30,11 @@
#include "dpaa2_hw_mempool.h"
#include "dpaa2_hw_mempool_logs.h"
+#include <dpaax_iova_table.h>
+
struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
static struct dpaa2_bp_list *h_bp_list;
-/* List of all the memseg information locally maintained in dpaa2 driver. This
- * is to optimize the PA_to_VA searches until a better mechanism (algo) is
- * available.
- */
-struct dpaa2_memseg_list rte_dpaa2_memsegs
- = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
-
/* Dynamic logging identified for mempool */
int dpaa2_logtype_mempool;
@@ -400,37 +395,14 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
void *vaddr, rte_iova_t paddr, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
- struct dpaa2_memseg *ms;
-
- /* For each memory chunk pinned to the Mempool, a linked list of the
- * contained memsegs is created for searching when PA to VA
- * conversion is required.
- */
- ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
- if (!ms) {
- DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
- DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
- /* If the element is not added, it would only lead to failure
- * in searching for the element and the logic would Fallback
- * to traditional DPDK memseg traversal code. So, this is not
- * a blocking error - but, error would be printed on screen.
- */
- return 0;
- }
-
- ms->vaddr = vaddr;
- ms->iova = paddr;
- ms->len = len;
- /* Head insertions are generally faster than tail insertions as the
- * buffers pinned are picked from rear end.
- */
- TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+ /* Insert entry into the PA->VA Table */
+ dpaax_iova_table_update(paddr, vaddr, len);
return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
obj_cb, obj_cb_arg);
}
-struct rte_mempool_ops dpaa2_mpool_ops = {
+static const struct rte_mempool_ops dpaa2_mpool_ops = {
.name = DPAA2_MEMPOOL_OPS_NAME,
.alloc = rte_hw_mbuf_create_pool,
.free = rte_hw_mbuf_free_pool,
diff --git a/drivers/mempool/dpaa2/meson.build b/drivers/mempool/dpaa2/meson.build
index 90bab606..6b6ead61 100644
--- a/drivers/mempool/dpaa2/meson.build
+++ b/drivers/mempool/dpaa2/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
index b9d996a6..b45e7a9a 100644
--- a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
+++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -3,7 +3,6 @@ DPDK_17.05 {
rte_dpaa2_bpid_info;
rte_dpaa2_mbuf_alloc_bulk;
- rte_dpaa2_memsegs;
local: *;
};
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index b00be137..e27c4377 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -50,7 +50,7 @@
#define OCTEONTX_FPAVF_BUF_OFFSET 128
/*
- * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * In Cavium OCTEON TX SoC, all accesses to the device registers are
* implicitly strongly ordered. So, the relaxed version of IO operation is
* safe to use with out any IO memory barriers.
*/
diff --git a/drivers/meson.build b/drivers/meson.build
index f94e2fe6..c3c66bbc 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -9,12 +9,17 @@ driver_classes = ['common',
'crypto', # depends on common, bus and mempool (net in future).
'compress', # depends on common, bus, mempool.
'event', # depends on common, bus, mempool and net.
+ 'baseband', # depends on common and bus.
'raw'] # depends on common, bus, mempool, net and event.
default_cflags = machine_args
if cc.has_argument('-Wno-format-truncation')
default_cflags += '-Wno-format-truncation'
endif
+
+# specify -D_GNU_SOURCE unconditionally
+default_cflags += '-D_GNU_SOURCE'
+
foreach class:driver_classes
drivers = []
std_deps = []
@@ -24,6 +29,7 @@ foreach class:driver_classes
# version file for linking
subdir(class)
+ class_drivers = []
foreach drv:drivers
drv_path = join_paths(class, drv)
@@ -51,6 +57,8 @@ foreach class:driver_classes
subdir(drv_path)
if build
+ class_drivers += name
+
dpdk_conf.set(config_flag_fmt.format(name.to_upper()),1)
lib_name = driver_name_fmt.format(name)
@@ -94,10 +102,8 @@ foreach class:driver_classes
lib_version = '@0@.1'.format(version)
so_version = '@0@'.format(version)
else
- pver = meson.project_version().split('.')
- lib_version = '@0@.@1@'.format(pver.get(0),
- pver.get(1))
- so_version = lib_version
+ lib_version = major_version
+ so_version = major_version
endif
# now build the static driver
@@ -141,4 +147,9 @@ foreach class:driver_classes
set_variable('static_@0@'.format(lib_name), static_dep)
endif # build
endforeach
+
+ if meson.version().version_compare('>=0.47')
+ # prior to 0.47, set_variable can't take array params
+ set_variable(class + '_drivers', class_drivers)
+ endif
endforeach
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 664398de..c0386feb 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -10,6 +10,7 @@ endif
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
+DIRS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atlantic
DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe
@@ -24,6 +25,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
endif
DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
+DIRS-$(CONFIG_RTE_LIBRTE_ENETC_PMD) += enetc
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
DIRS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe
DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
@@ -32,6 +34,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
+DIRS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta
DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2
DIRS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += netvsc
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index eb3cce3a..95a98c6b 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -305,7 +305,6 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -927,8 +926,7 @@ rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
PMD_LOG(ERR, "Failed to probe %s", name);
@@ -988,6 +986,12 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
if (eth_dev == NULL)
return -1;
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
+
internals = eth_dev->data->dev_private;
for (q = 0; q < internals->nb_queues; q++) {
rte_free(internals->rx_queue[q].rd);
@@ -995,8 +999,6 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
}
free(internals->if_name);
- rte_free(eth_dev->data->dev_private);
-
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
index eea388a1..57026f8d 100644
--- a/drivers/net/ark/ark_ddm.c
+++ b/drivers/net/ark/ark_ddm.c
@@ -11,14 +11,22 @@
int
ark_ddm_verify(struct ark_ddm_t *ddm)
{
+ uint32_t hw_const;
if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) {
PMD_DRV_LOG(ERR, "ARK: DDM structure looks incorrect %d vs %zd\n",
ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t));
return -1;
}
- if (ddm->cfg.const0 != ARK_DDM_CONST) {
- PMD_DRV_LOG(ERR, "ARK: DDM module not found as expected 0x%08x\n",
+ hw_const = ddm->cfg.const0;
+ if (hw_const == ARK_DDM_CONST1) {
+ PMD_DRV_LOG(ERR,
+ "ARK: DDM module is version 1, "
+ "PMD expects version 2\n");
+ return -1;
+ } else if (hw_const != ARK_DDM_CONST2) {
+ PMD_DRV_LOG(ERR,
+ "ARK: DDM module not found as expected 0x%08x\n",
ddm->cfg.const0);
return -1;
}
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
index b37d1e09..5456b4b5 100644
--- a/drivers/net/ark/ark_ddm.h
+++ b/drivers/net/ark/ark_ddm.h
@@ -19,7 +19,7 @@
/* struct defining Tx meta data -- fixed in FPGA -- 16 bytes */
struct ark_tx_meta {
uint64_t physaddr;
- uint32_t delta_ns;
+ uint32_t user1;
uint16_t data_len; /* of this MBUF */
#define ARK_DDM_EOP 0x01
#define ARK_DDM_SOP 0x02
@@ -34,7 +34,10 @@ struct ark_tx_meta {
* structs will never be instantiated in ram memory
*/
#define ARK_DDM_CFG 0x0000
-#define ARK_DDM_CONST 0xfacecafe
+/* Set unique HW ID for hardware version */
+#define ARK_DDM_CONST2 (0x324d4444)
+#define ARK_DDM_CONST1 (0xfacecafe)
+
struct ark_ddm_cfg_t {
uint32_t r0;
volatile uint32_t tlp_stats_clear;
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 552ca01a..4f52e2bd 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -313,6 +313,9 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
/* We are a single function multi-port device. */
ret = ark_config_device(dev);
+ if (ret)
+ return -1;
+
dev->dev_ops = &ark_eth_dev_ops;
dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
@@ -506,7 +509,6 @@ eth_ark_dev_uninit(struct rte_eth_dev *dev)
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
- rte_free(dev->data->mac_addrs);
return 0;
}
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 16f0d11e..300029d6 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -25,6 +25,9 @@ static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
struct rte_mbuf *mbuf0,
uint32_t cons_index);
static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
+static int eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
+ uint32_t *pnb,
+ struct rte_mbuf **mbufs);
/* ************************************************************************* */
struct ark_rx_queue {
@@ -50,7 +53,7 @@ struct ark_rx_queue {
/* The queue Index is used within the dpdk device structures */
uint16_t queue_index;
- uint32_t pad1;
+ uint32_t last_cons;
/* separate cache line */
/* second cache line - fields only used in slow path */
@@ -102,7 +105,10 @@ eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
{
queue->cons_index = cons_index;
eth_ark_rx_seed_mbufs(queue);
- ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ if (((cons_index - queue->last_cons) >= 64U)) {
+ queue->last_cons = cons_index;
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ }
}
/* ************************************************************************* */
@@ -196,20 +202,25 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* populate mbuf reserve */
status = eth_ark_rx_seed_mbufs(queue);
+ if (queue->seed_index != nb_desc) {
+ PMD_DRV_LOG(ERR, "ARK: Failed to allocate %u mbufs for RX queue %d\n",
+ nb_desc, qidx);
+ status = -1;
+ }
/* MPU Setup */
if (status == 0)
status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
if (unlikely(status != 0)) {
- struct rte_mbuf *mbuf;
+ struct rte_mbuf **mbuf;
PMD_DRV_LOG(ERR, "Failed to initialize RX queue %d %s\n",
qidx,
__func__);
/* Free the mbufs allocated */
- for (i = 0, mbuf = queue->reserve_q[0];
- i < nb_desc; ++i, mbuf++) {
- rte_pktmbuf_free(mbuf);
+ for (i = 0, mbuf = queue->reserve_q;
+ i < queue->seed_index; ++i, mbuf++) {
+ rte_pktmbuf_free(*mbuf);
}
rte_free(queue->reserve_q);
rte_free(queue->paddress_q);
@@ -446,8 +457,13 @@ eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
- if (unlikely(status != 0))
- return -1;
+ if (unlikely(status != 0)) {
+ /* Try to recover from lack of mbufs in pool */
+ status = eth_ark_rx_seed_recovery(queue, &nb, mbufs);
+ if (unlikely(status != 0)) {
+ return -1;
+ }
+ }
if (ARK_RX_DEBUG) { /* DEBUG */
while (count != nb) {
@@ -495,6 +511,29 @@ eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
return 0;
}
+int
+eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
+ uint32_t *pnb,
+ struct rte_mbuf **mbufs)
+{
+ int status = -1;
+
+ /* Ignore small allocation failures */
+ if (*pnb <= 64)
+ return -1;
+
+ *pnb = 64U;
+ status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, *pnb);
+ if (status != 0) {
+ PMD_DRV_LOG(ERR,
+ "ARK: Could not allocate %u mbufs from pool for RX queue %u;"
+ " %u free buffers remaining in queue\n",
+ *pnb, queue->queue_index,
+ queue->seed_index - queue->cons_index);
+ }
+ return status;
+}
+
void
eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
const char *msg)
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index 57188c24..94da5f95 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -65,7 +65,7 @@ eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
uint8_t flags)
{
meta->physaddr = rte_mbuf_data_iova(mbuf);
- meta->delta_ns = 0;
+ meta->user1 = (uint32_t)mbuf->udata64;
meta->data_len = rte_pktmbuf_data_len(mbuf);
meta->flags = flags;
}
diff --git a/drivers/net/atlantic/Makefile b/drivers/net/atlantic/Makefile
new file mode 100644
index 00000000..62dcdbff
--- /dev/null
+++ b/drivers/net/atlantic/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Aquantia Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_atlantic.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_atlantic_version.map
+
+LIBABIVER := 1
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net
+LDLIBS += -lrte_bus_pci
+
+VPATH += $(SRCDIR)/hw_atl
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_hw_regs.c
+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_utils.c
+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_llh.c
+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_utils_fw2x.c
+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_b0.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/atlantic/atl_common.h b/drivers/net/atlantic/atl_common.h
new file mode 100644
index 00000000..b3a0aa5c
--- /dev/null
+++ b/drivers/net/atlantic/atl_common.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Aquantia Corporation
+ */
+
+#ifndef AQ_COMMON_H
+#define AQ_COMMON_H
+
+#define ATL_PMD_DRIVER_VERSION "0.4.1"
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+
+#define AQ_DEVICE_ID_0001 0x0001
+#define AQ_DEVICE_ID_D100 0xD100
+#define AQ_DEVICE_ID_D107 0xD107
+#define AQ_DEVICE_ID_D108 0xD108
+#define AQ_DEVICE_ID_D109 0xD109
+
+#define AQ_DEVICE_ID_AQC100 0x00B1
+#define AQ_DEVICE_ID_AQC107 0x07B1
+#define AQ_DEVICE_ID_AQC108 0x08B1
+#define AQ_DEVICE_ID_AQC109 0x09B1
+#define AQ_DEVICE_ID_AQC111 0x11B1
+#define AQ_DEVICE_ID_AQC112 0x12B1
+
+#define AQ_DEVICE_ID_AQC100S 0x80B1
+#define AQ_DEVICE_ID_AQC107S 0x87B1
+#define AQ_DEVICE_ID_AQC108S 0x88B1
+#define AQ_DEVICE_ID_AQC109S 0x89B1
+#define AQ_DEVICE_ID_AQC111S 0x91B1
+#define AQ_DEVICE_ID_AQC112S 0x92B1
+
+#define AQ_DEVICE_ID_AQC111E 0x51B1
+#define AQ_DEVICE_ID_AQC112E 0x52B1
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+
+#define AQ_HWREV_ANY 0
+#define AQ_HWREV_1 1
+#define AQ_HWREV_2 2
+
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5G5R BIT(2)
+#define AQ_NIC_RATE_2G5 BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+
+#define AQ_NIC_RATE_EEE_10G BIT(6)
+#define AQ_NIC_RATE_EEE_5G BIT(7)
+#define AQ_NIC_RATE_EEE_2G5 BIT(8)
+#define AQ_NIC_RATE_EEE_1G BIT(9)
+
+
+#define ATL_MAX_RING_DESC (8 * 1024 - 8)
+#define ATL_MIN_RING_DESC 32
+#define ATL_RXD_ALIGN 8
+#define ATL_TXD_ALIGN 8
+#define ATL_TX_MAX_SEG 16
+
+#define ATL_MAX_INTR_QUEUE_NUM 15
+
+#define ATL_MISC_VEC_ID 10
+#define ATL_RX_VEC_START 0
+
+#define AQ_NIC_WOL_ENABLED BIT(0)
+
+
+#define AQ_NIC_FC_OFF 0U
+#define AQ_NIC_FC_TX 1U
+#define AQ_NIC_FC_RX 2U
+#define AQ_NIC_FC_FULL 3U
+#define AQ_NIC_FC_AUTO 4U
+
+
+#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX (2U * 1024U)
+
+#define AQ_HW_MULTICAST_ADDRESS_MAX 32
+#define AQ_HW_MAX_SEGS_SIZE 40
+
+#define AQ_HW_MAX_RX_QUEUES 8
+#define AQ_HW_MAX_TX_QUEUES 8
+#define AQ_HW_MIN_RX_RING_SIZE 512
+#define AQ_HW_MAX_RX_RING_SIZE 8192
+#define AQ_HW_MIN_TX_RING_SIZE 512
+#define AQ_HW_MAX_TX_RING_SIZE 8192
+
+#define ATL_DEFAULT_RX_FREE_THRESH 64
+#define ATL_DEFAULT_TX_FREE_THRESH 64
+
+#define ATL_IRQ_CAUSE_LINK 0x8
+
+#define AQ_HW_LED_BLINK 0x2U
+#define AQ_HW_LED_DEFAULT 0x0U
+
+#endif /* AQ_COMMON_H */
diff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c
new file mode 100644
index 00000000..5bc04f55
--- /dev/null
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -0,0 +1,1539 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Aquantia Corporation
+ */
+
+#include <rte_ethdev_pci.h>
+
+#include "atl_ethdev.h"
+#include "atl_common.h"
+#include "atl_hw_regs.h"
+#include "atl_logs.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl/hw_atl_b0_internal.h"
+
+static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
+
+static int atl_dev_configure(struct rte_eth_dev *dev);
+static int atl_dev_start(struct rte_eth_dev *dev);
+static void atl_dev_stop(struct rte_eth_dev *dev);
+static int atl_dev_set_link_up(struct rte_eth_dev *dev);
+static int atl_dev_set_link_down(struct rte_eth_dev *dev);
+static void atl_dev_close(struct rte_eth_dev *dev);
+static int atl_dev_reset(struct rte_eth_dev *dev);
+static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
+
+static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size);
+
+static int atl_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+
+static int atl_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned int n);
+
+static void atl_dev_stats_reset(struct rte_eth_dev *dev);
+
+static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size);
+
+static void atl_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+/* VLAN stuff */
+static int atl_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+
+static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t queue_id, int on);
+
+static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type, uint16_t tpid);
+
+/* EEPROM */
+static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
+static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+
+/* Regs */
+static int atl_dev_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
+/* Flow control */
+static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+
+static void atl_dev_link_status_print(struct rte_eth_dev *dev);
+
+/* Interrupts */
+static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
+static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void atl_dev_interrupt_handler(void *param);
+
+
+static int atl_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+
+static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
+/* RSS */
+static int atl_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int atl_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int atl_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+
+static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev);
+static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
+
+static void atl_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+int atl_logtype_init;
+int atl_logtype_driver;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_atl_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
+
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
+
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
+
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver rte_atl_pmd = {
+ .id_table = pci_id_atl_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_atl_pci_probe,
+ .remove = eth_atl_pci_remove,
+};
+
+#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
+ | DEV_RX_OFFLOAD_IPV4_CKSUM \
+ | DEV_RX_OFFLOAD_UDP_CKSUM \
+ | DEV_RX_OFFLOAD_TCP_CKSUM \
+ | DEV_RX_OFFLOAD_JUMBO_FRAME)
+
+#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
+ | DEV_TX_OFFLOAD_IPV4_CKSUM \
+ | DEV_TX_OFFLOAD_UDP_CKSUM \
+ | DEV_TX_OFFLOAD_TCP_CKSUM \
+ | DEV_TX_OFFLOAD_TCP_TSO \
+ | DEV_TX_OFFLOAD_MULTI_SEGS)
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = ATL_MAX_RING_DESC,
+ .nb_min = ATL_MIN_RING_DESC,
+ .nb_align = ATL_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = ATL_MAX_RING_DESC,
+ .nb_min = ATL_MIN_RING_DESC,
+ .nb_align = ATL_TXD_ALIGN,
+ .nb_seg_max = ATL_TX_MAX_SEG,
+ .nb_mtu_seg_max = ATL_TX_MAX_SEG,
+};
+
+#define ATL_XSTATS_FIELD(name) { \
+ #name, \
+ offsetof(struct aq_stats_s, name) \
+}
+
+struct atl_xstats_tbl_s {
+ const char *name;
+ unsigned int offset;
+};
+
+static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
+ ATL_XSTATS_FIELD(uprc),
+ ATL_XSTATS_FIELD(mprc),
+ ATL_XSTATS_FIELD(bprc),
+ ATL_XSTATS_FIELD(erpt),
+ ATL_XSTATS_FIELD(uptc),
+ ATL_XSTATS_FIELD(mptc),
+ ATL_XSTATS_FIELD(bptc),
+ ATL_XSTATS_FIELD(erpr),
+ ATL_XSTATS_FIELD(ubrc),
+ ATL_XSTATS_FIELD(ubtc),
+ ATL_XSTATS_FIELD(mbrc),
+ ATL_XSTATS_FIELD(mbtc),
+ ATL_XSTATS_FIELD(bbrc),
+ ATL_XSTATS_FIELD(bbtc),
+};
+
+static const struct eth_dev_ops atl_eth_dev_ops = {
+ .dev_configure = atl_dev_configure,
+ .dev_start = atl_dev_start,
+ .dev_stop = atl_dev_stop,
+ .dev_set_link_up = atl_dev_set_link_up,
+ .dev_set_link_down = atl_dev_set_link_down,
+ .dev_close = atl_dev_close,
+ .dev_reset = atl_dev_reset,
+
+ /* PROMISC */
+ .promiscuous_enable = atl_dev_promiscuous_enable,
+ .promiscuous_disable = atl_dev_promiscuous_disable,
+ .allmulticast_enable = atl_dev_allmulticast_enable,
+ .allmulticast_disable = atl_dev_allmulticast_disable,
+
+ /* Link */
+ .link_update = atl_dev_link_update,
+
+ .get_reg = atl_dev_get_regs,
+
+ /* Stats */
+ .stats_get = atl_dev_stats_get,
+ .xstats_get = atl_dev_xstats_get,
+ .xstats_get_names = atl_dev_xstats_get_names,
+ .stats_reset = atl_dev_stats_reset,
+ .xstats_reset = atl_dev_stats_reset,
+
+ .fw_version_get = atl_fw_version_get,
+ .dev_infos_get = atl_dev_info_get,
+ .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
+
+ .mtu_set = atl_dev_mtu_set,
+
+ /* VLAN */
+ .vlan_filter_set = atl_vlan_filter_set,
+ .vlan_offload_set = atl_vlan_offload_set,
+ .vlan_tpid_set = atl_vlan_tpid_set,
+ .vlan_strip_queue_set = atl_vlan_strip_queue_set,
+
+ /* Queue Control */
+ .rx_queue_start = atl_rx_queue_start,
+ .rx_queue_stop = atl_rx_queue_stop,
+ .rx_queue_setup = atl_rx_queue_setup,
+ .rx_queue_release = atl_rx_queue_release,
+
+ .tx_queue_start = atl_tx_queue_start,
+ .tx_queue_stop = atl_tx_queue_stop,
+ .tx_queue_setup = atl_tx_queue_setup,
+ .tx_queue_release = atl_tx_queue_release,
+
+ .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
+
+ .rx_queue_count = atl_rx_queue_count,
+ .rx_descriptor_status = atl_dev_rx_descriptor_status,
+ .tx_descriptor_status = atl_dev_tx_descriptor_status,
+
+ /* EEPROM */
+ .get_eeprom_length = atl_dev_get_eeprom_length,
+ .get_eeprom = atl_dev_get_eeprom,
+ .set_eeprom = atl_dev_set_eeprom,
+
+ /* Flow Control */
+ .flow_ctrl_get = atl_flow_ctrl_get,
+ .flow_ctrl_set = atl_flow_ctrl_set,
+
+ /* MAC */
+ .mac_addr_add = atl_add_mac_addr,
+ .mac_addr_remove = atl_remove_mac_addr,
+ .mac_addr_set = atl_set_default_mac_addr,
+ .set_mc_addr_list = atl_dev_set_mc_addr_list,
+ .rxq_info_get = atl_rxq_info_get,
+ .txq_info_get = atl_txq_info_get,
+
+ .reta_update = atl_reta_update,
+ .reta_query = atl_reta_query,
+ .rss_hash_update = atl_rss_hash_update,
+ .rss_hash_conf_get = atl_rss_hash_conf_get,
+};
+
+static inline int32_t
+atl_reset_hw(struct aq_hw_s *hw)
+{
+ return hw_atl_b0_hw_reset(hw);
+}
+
+static inline void
+atl_enable_intr(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
+}
+
+static void
+atl_disable_intr(struct aq_hw_s *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+ hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
+}
+
+static int
+eth_atl_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct atl_adapter *adapter =
+ (struct atl_adapter *)eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &atl_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &atl_recv_pkts;
+ eth_dev->tx_pkt_burst = &atl_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &atl_prep_pkts;
+
+ /* For secondary processes, the primary process has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Vendor and Device ID need to be set before init of shared code */
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->mmio = (void *)pci_dev->mem_resource[0].addr;
+
+ /* Hardware configuration - hardcode */
+ adapter->hw_cfg.is_lro = false;
+ adapter->hw_cfg.wol = false;
+ adapter->hw_cfg.is_rss = false;
+ adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
+
+ adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M;
+
+ adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
+ adapter->hw_cfg.aq_rss.indirection_table_size =
+ HW_ATL_B0_RSS_REDIRECTION_MAX;
+
+ hw->aq_nic_cfg = &adapter->hw_cfg;
+
+ /* disable interrupt */
+ atl_disable_intr(hw);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "MAC Malloc failed");
+ return -ENOMEM;
+ }
+
+ err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
+ if (err)
+ return err;
+
+ /* Copy the permanent MAC address */
+ if (hw->aq_fw_ops->get_mac_permanent(hw,
+ eth_dev->data->mac_addrs->addr_bytes) != 0)
+ return -EINVAL;
+
+ /* Reset the hw statistics */
+ atl_dev_stats_reset(eth_dev);
+
+ rte_intr_callback_register(intr_handle,
+ atl_dev_interrupt_handler, eth_dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* enable support intr */
+ atl_enable_intr(eth_dev);
+
+ return err;
+}
+
+static int
+eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct aq_hw_s *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (hw->adapter_stopped == 0)
+ atl_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ atl_dev_interrupt_handler, eth_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int
+eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct atl_adapter), eth_atl_dev_init);
+}
+
+static int
+eth_atl_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
+}
+
+static int
+atl_dev_configure(struct rte_eth_dev *dev)
+{
+ struct atl_interrupt *intr =
+ ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set flag to update link status after init */
+ intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
+
+ return 0;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+atl_dev_start(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+ uint32_t *link_speeds;
+ uint32_t speed = 0;
+ int status;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set adapter started */
+ hw->adapter_stopped = 0;
+
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, fix speed not supported",
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* reinitialize adapter
+ * this calls reset and start
+ */
+ status = atl_reset_hw(hw);
+ if (status != 0)
+ return -EIO;
+
+ err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
+
+ hw_atl_b0_hw_start(hw);
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
+ PMD_INIT_LOG(ERR, "At most %d intr queues supported",
+ ATL_MAX_INTR_QUEUE_NUM);
+ return -ENOTSUP;
+ }
+ if (rte_intr_efd_enable(intr_handle, intr_vector)) {
+ PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
+ return -1;
+ }
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec = rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* initialize transmission unit */
+ atl_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = atl_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ goto error;
+ }
+
+ PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
+ hw->fw_ver_actual >> 24,
+ (hw->fw_ver_actual >> 16) & 0xFF,
+ hw->fw_ver_actual & 0xFFFF);
+ PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
+
+ err = atl_start_queues(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
+ goto error;
+ }
+
+ err = hw->aq_fw_ops->update_link_status(hw);
+
+ if (err)
+ goto error;
+
+ dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
+
+ link_speeds = &dev->data->dev_conf.link_speeds;
+
+ speed = 0x0;
+
+ if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ speed = hw->aq_nic_cfg->link_speed_msk;
+ } else {
+ if (*link_speeds & ETH_LINK_SPEED_10G)
+ speed |= AQ_NIC_RATE_10G;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= AQ_NIC_RATE_5G;
+ if (*link_speeds & ETH_LINK_SPEED_1G)
+ speed |= AQ_NIC_RATE_1G;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= AQ_NIC_RATE_2G5;
+ if (*link_speeds & ETH_LINK_SPEED_100M)
+ speed |= AQ_NIC_RATE_100M;
+ }
+
+ err = hw->aq_fw_ops->set_link_speed(hw, speed);
+ if (err)
+ goto error;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ atl_dev_lsc_interrupt_setup(dev, true);
+ else
+ atl_dev_lsc_interrupt_setup(dev, false);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ atl_dev_interrupt_handler, dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
+ atl_dev_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ atl_enable_intr(dev);
+
+ return 0;
+
+error:
+ atl_stop_queues(dev);
+ return -EIO;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+atl_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct aq_hw_s *hw =
+ ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* disable interrupts */
+ atl_disable_intr(hw);
+
+ /* reset the NIC */
+ atl_reset_hw(hw);
+ hw->adapter_stopped = 1;
+
+ atl_stop_queues(dev);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+ dev->data->lro = 0;
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ atl_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+/*
+ * Set device link up: enable tx.
+ */
+static int
+atl_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return hw->aq_fw_ops->set_link_speed(hw,
+ hw->aq_nic_cfg->link_speed_msk);
+}
+
+/*
+ * Set device link down: disable tx.
+ */
+static int
+atl_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return hw->aq_fw_ops->set_link_speed(hw, 0);
+}
+
+/*
+ * Reset and stop device.
+ */
+static void
+atl_dev_close(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ atl_dev_stop(dev);
+
+ atl_free_queues(dev);
+}
+
+static int
+atl_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = eth_atl_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_atl_dev_init(dev);
+
+ return ret;
+}
+
+
+static int
+atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
+ struct aq_hw_s *hw = &adapter->hw;
+ struct atl_sw_stats *swstats = &adapter->sw_stats;
+ unsigned int i;
+
+ hw->aq_fw_ops->update_stats(hw);
+
+ /* Fill out the rte_eth_stats statistics structure */
+ stats->ipackets = hw->curr_stats.dma_pkt_rc;
+ stats->ibytes = hw->curr_stats.dma_oct_rc;
+ stats->imissed = hw->curr_stats.dpc;
+ stats->ierrors = hw->curr_stats.erpt;
+
+ stats->opackets = hw->curr_stats.dma_pkt_tc;
+ stats->obytes = hw->curr_stats.dma_oct_tc;
+ stats->oerrors = 0;
+
+ stats->rx_nombuf = swstats->rx_nombuf;
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ stats->q_ipackets[i] = swstats->q_ipackets[i];
+ stats->q_opackets[i] = swstats->q_opackets[i];
+ stats->q_ibytes[i] = swstats->q_ibytes[i];
+ stats->q_obytes[i] = swstats->q_obytes[i];
+ stats->q_errors[i] = swstats->q_errors[i];
+ }
+ return 0;
+}
+
+static void
+atl_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
+ struct aq_hw_s *hw = &adapter->hw;
+
+ hw->aq_fw_ops->update_stats(hw);
+
+ /* Reset software totals */
+ memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
+
+ memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
+}
+
+static int
+atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (!xstats_names)
+ return RTE_DIM(atl_xstats_tbl);
+
+ for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
+ snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
+ atl_xstats_tbl[i].name);
+
+ return size;
+}
+
+static int
+atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n)
+{
+ struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
+ struct aq_hw_s *hw = &adapter->hw;
+ unsigned int i;
+
+ if (!stats)
+ return 0;
+
+ for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
+ stats[i].id = i;
+ stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
+ atl_xstats_tbl[i].offset);
+ }
+
+ return n;
+}
+
+static int
+atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fw_ver = 0;
+ unsigned int ret = 0;
+
+ ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
+ if (ret)
+ return -EIO;
+
+ ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
+ (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
+
+ ret += 1; /* add string null-terminator */
+
+ if (fw_size < ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
+
+ dev_info->min_rx_bufsize = 1024;
+ dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
+ dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
+ dev_info->max_vfs = pci_dev->max_vfs;
+
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vmdq_pools = 0;
+ dev_info->vmdq_queue_num = 0;
+
+ dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
+
+ dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
+
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
+ dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
+ dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+ dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
+ dev_info->speed_capa |= ETH_LINK_SPEED_5G;
+}
+
+static const uint32_t *
+atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == atl_recv_pkts)
+ return ptypes;
+
+ return NULL;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct atl_interrupt *intr =
+ ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct rte_eth_link link, old;
+ int err = 0;
+
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = 0;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+ memset(&old, 0, sizeof(old));
+
+ /* load old link status */
+ rte_eth_linkstatus_get(dev, &old);
+
+ /* read current link status */
+ err = hw->aq_fw_ops->update_link_status(hw);
+
+ if (err)
+ return 0;
+
+ if (hw->aq_link_status.mbps == 0) {
+ /* write default (down) link status */
+ rte_eth_linkstatus_set(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+ return 0;
+ }
+
+ intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
+
+ link.link_status = ETH_LINK_UP;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = hw->aq_link_status.mbps;
+
+ rte_eth_linkstatus_set(dev, &link);
+
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+static void
+atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
+}
+
+static void
+atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
+}
+
+static void
+atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
+}
+
+static void
+atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+
+ hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+
+static int
+atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
+{
+ atl_dev_link_status_print(dev);
+ return 0;
+}
+
+static int
+atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+
+static int
+atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ struct atl_interrupt *intr =
+ ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u64 cause = 0;
+
+ hw_atl_b0_hw_irq_read(hw, &cause);
+
+ atl_disable_intr(hw);
+ intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
+ ATL_FLAG_NEED_LINK_UPDATE : 0;
+
+ return 0;
+}
+
+/**
+ * It gets and then prints the link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static void
+atl_dev_link_status_print(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_get(dev, &link);
+ if (link.link_status) {
+ PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ (int)(dev->data->port_id),
+ (unsigned int)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_DRV_LOG(INFO, " Port %d: Link Down",
+ (int)(dev->data->port_id));
+ }
+
+
+#ifdef DEBUG
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+}
+#endif
+
+ PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
+}
+
+/*
+ * It executes link_update after knowing an interrupt occurred.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+atl_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct atl_interrupt *intr =
+ ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
+ atl_dev_link_update(dev, 0);
+ intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
+ atl_dev_link_status_print(dev);
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+
+ atl_enable_intr(dev);
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+atl_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ atl_dev_interrupt_get_status(dev);
+ atl_dev_interrupt_action(dev, dev->intr_handle);
+}
+
+#define SFP_EEPROM_SIZE 0xff
+
+static int
+atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
+{
+ return SFP_EEPROM_SIZE;
+}
+
+static int
+atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->aq_fw_ops->get_eeprom == NULL)
+ return -ENOTSUP;
+
+ if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
+ return -EINVAL;
+
+ return hw->aq_fw_ops->get_eeprom(hw, eeprom->data, eeprom->length);
+}
+
+static int
+atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->aq_fw_ops->set_eeprom == NULL)
+ return -ENOTSUP;
+
+ if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
+ return -EINVAL;
+
+ return hw->aq_fw_ops->set_eeprom(hw, eeprom->data, eeprom->length);
+}
+
+static int
+atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 mif_id;
+ int err;
+
+ if (regs->data == NULL) {
+ regs->length = hw_atl_utils_hw_get_reg_length();
+ regs->width = sizeof(u32);
+ return 0;
+ }
+
+ /* Only full register dump is supported */
+ if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
+ return -ENOTSUP;
+
+ err = hw_atl_utils_hw_get_regs(hw, regs->data);
+
+ /* Device version */
+ mif_id = hw_atl_reg_glb_mif_id_get(hw);
+ regs->version = mif_id & 0xFFU;
+
+ return err;
+}
+
+static int
+atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
+ fc_conf->mode = RTE_FC_NONE;
+ else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
+ fc_conf->mode = RTE_FC_FULL;
+ else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+
+ return 0;
+}
+
+static int
+atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
+
+
+ if (hw->aq_fw_ops->set_flow_control == NULL)
+ return -ENOTSUP;
+
+ if (fc_conf->mode == RTE_FC_NONE)
+ hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
+ else if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
+ else if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
+ else if (fc_conf->mode == RTE_FC_FULL)
+ hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
+
+ if (old_flow_control != hw->aq_nic_cfg->flow_control)
+ return hw->aq_fw_ops->set_flow_control(hw);
+
+ return 0;
+}
+
+static int
+atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
+ u8 *mac_addr, bool enable)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+ int err;
+
+ if (mac_addr) {
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ }
+
+ hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
+
+ if (enable)
+ hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
+
+ err = aq_hw_err_from_flags(hw);
+
+ return err;
+}
+
+static int
+atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index __rte_unused, uint32_t pool __rte_unused)
+{
+ if (is_zero_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
+ return -EINVAL;
+ }
+
+ return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
+}
+
+static void
+atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ atl_update_mac_addr(dev, index, NULL, false);
+}
+
+static int
+atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ atl_remove_mac_addr(dev, 0);
+ atl_add_mac_addr(dev, addr, 0, 0);
+ return 0;
+}
+
+static int
+atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ atl_dev_info_get(dev, &dev_info);
+
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return 0;
+}
+
+static int
+atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err = 0;
+ int i = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i] == vlan_id) {
+ if (!on) {
+ /* Disable VLAN filter. */
+ hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
+
+ /* Clear VLAN filter entry */
+ cfg->vlan_filter[i] = 0;
+ }
+ break;
+ }
+ }
+
+ /* VLAN_ID was not found. So, nothing to delete. */
+ if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
+ goto exit;
+
+ /* VLAN_ID already exist, or already removed above. Nothing to do. */
+ if (i != HW_ATL_B0_MAX_VLAN_IDS)
+ goto exit;
+
+ /* Try to found free VLAN filter to add new VLAN_ID */
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i] == 0)
+ break;
+ }
+
+ if (i == HW_ATL_B0_MAX_VLAN_IDS) {
+ /* We have no free VLAN filter to add new VLAN_ID*/
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ cfg->vlan_filter[i] = vlan_id;
+ hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
+ hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
+ hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
+
+exit:
+ /* Enable VLAN promisc mode if vlan_filter empty */
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i] != 0)
+ break;
+ }
+
+ hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
+
+ return err;
+}
+
+static int
+atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
+ if (cfg->vlan_filter[i])
+ hw_atl_rpf_vlan_flr_en_set(hw, en, i);
+ }
+ return 0;
+}
+
+static int
+atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
+
+ cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ ret = -ENOTSUP;
+
+ return ret;
+}
+
+static int
+atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ switch (vlan_type) {
+ case ETH_VLAN_TYPE_INNER:
+ hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type");
+ err = -ENOTSUP;
+ }
+
+ return err;
+}
+
+static void
+atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_id > dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue id");
+ return;
+ }
+
+ hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
+}
+
+static int
+atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 i;
+
+ if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
+ return -EINVAL;
+
+ /* Update whole uc filters table */
+ for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
+ u8 *mac_addr = NULL;
+ u32 l = 0, h = 0;
+
+ if (i < nb_mc_addr) {
+ mac_addr = mc_addr_set[i].addr_bytes;
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ h = (mac_addr[0] << 8) | mac_addr[1];
+ }
+
+ hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
+ HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
+ HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
+ HW_ATL_B0_MAC_MIN + i);
+ }
+
+ return 0;
+}
+
+static int
+atl_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i;
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+
+ for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
+ cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
+ dev->data->nb_rx_queues - 1);
+
+ hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
+ return 0;
+}
+
+static int
+atl_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i;
+ struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+
+ for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
+ reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
+ reta_conf->mask = ~0U;
+ return 0;
+}
+
+static int
+atl_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ static u8 def_rss_key[40] = {
+ 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+ 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+ 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+ 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+ 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+ };
+
+ cfg->is_rss = !!rss_conf->rss_hf;
+ if (rss_conf->rss_key) {
+ memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
+ } else {
+ memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
+ sizeof(def_rss_key));
+ cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
+ }
+
+ hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
+ return 0;
+}
+
+static int
+atl_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+
+ rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
+ if (rss_conf->rss_key) {
+ rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
+ memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
+ rss_conf->rss_key_len);
+ }
+
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
+
+RTE_INIT(atl_init_log)
+{
+ atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
+ if (atl_logtype_init >= 0)
+ rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
+ atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
+ if (atl_logtype_driver >= 0)
+ rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
+}
+
diff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h
new file mode 100644
index 00000000..1e29999b
--- /dev/null
+++ b/drivers/net/atlantic/atl_ethdev.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Aquantia Corporation
+ */
+
+#ifndef _ATLANTIC_ETHDEV_H_
+#define _ATLANTIC_ETHDEV_H_
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+
+#include "atl_types.h"
+#include "hw_atl/hw_atl_utils.h"
+
+#define ATL_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+#define ATL_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct atl_adapter *)adapter)->hw)
+
+#define ATL_DEV_TO_ADAPTER(dev) \
+ ((struct atl_adapter *)(dev)->data->dev_private)
+
+#define ATL_DEV_PRIVATE_TO_INTR(adapter) \
+ (&((struct atl_adapter *)adapter)->intr)
+
+#define ATL_DEV_PRIVATE_TO_CFG(adapter) \
+ (&((struct atl_adapter *)adapter)->hw_cfg)
+
+#define ATL_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+#define ATL_FLAG_NEED_LINK_CONFIG (uint32_t)(4 << 0)
+
+struct atl_interrupt {
+ uint32_t flags;
+ uint32_t mask;
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct atl_adapter {
+ struct aq_hw_s hw;
+ struct aq_hw_cfg_s hw_cfg;
+ struct atl_sw_stats sw_stats;
+ struct atl_interrupt intr;
+};
+
+/*
+ * RX/TX function prototypes
+ */
+void atl_rx_queue_release(void *rxq);
+void atl_tx_queue_release(void *txq);
+
+int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+uint32_t atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+int atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id);
+int atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id);
+
+int atl_rx_init(struct rte_eth_dev *dev);
+int atl_tx_init(struct rte_eth_dev *dev);
+
+int atl_start_queues(struct rte_eth_dev *dev);
+int atl_stop_queues(struct rte_eth_dev *dev);
+void atl_free_queues(struct rte_eth_dev *dev);
+
+int atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+void atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+uint16_t atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t atl_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _ATLANTIC_ETHDEV_H_ */
diff --git a/drivers/net/atlantic/atl_hw_regs.c b/drivers/net/atlantic/atl_hw_regs.c
new file mode 100644
index 00000000..bd42c834
--- /dev/null
+++ b/drivers/net/atlantic/atl_hw_regs.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_hw_utils.c: Definitions of helper functions used across
+ * hardware layer.
+ */
+
+#include "atl_hw_regs.h"
+
+#include <rte_io.h>
+#include <rte_byteorder.h>
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val)
+{
+ if (msk ^ ~0) {
+ u32 reg_old, reg_new;
+
+ reg_old = aq_hw_read_reg(aq_hw, addr);
+ reg_new = (reg_old & (~msk)) | (val << shift);
+
+ if (reg_old != reg_new)
+ aq_hw_write_reg(aq_hw, addr, reg_new);
+ } else {
+ aq_hw_write_reg(aq_hw, addr, val);
+ }
+}
+
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
+{
+ return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
+}
+
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
+{
+ return rte_le_to_cpu_32(rte_read32((u8 *)hw->mmio + reg));
+}
+
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
+{
+ rte_write32((rte_cpu_to_le_32(value)), (u8 *)hw->mmio + reg);
+}
+
+int aq_hw_err_from_flags(struct aq_hw_s *hw)
+{
+ int err = 0;
+
+ if (aq_hw_read_reg(hw, 0x10U) == ~0U)
+ return -ENXIO;
+
+ return err;
+}
diff --git a/drivers/net/atlantic/atl_hw_regs.h b/drivers/net/atlantic/atl_hw_regs.h
new file mode 100644
index 00000000..a2d6ca80
--- /dev/null
+++ b/drivers/net/atlantic/atl_hw_regs.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_hw_utils.h: Declaration of helper functions used across hardware
+ * layer.
+ */
+
+#ifndef AQ_HW_UTILS_H
+#define AQ_HW_UTILS_H
+
+#include <rte_common.h>
+#include <rte_io.h>
+#include <rte_byteorder.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include "atl_common.h"
+#include "atl_types.h"
+
+
+#ifndef HIDWORD
+#define LODWORD(_qw) ((u32)(_qw))
+#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff))
+#endif
+
+#define AQ_HW_SLEEP(_US_) rte_delay_ms(_US_)
+
+#define mdelay rte_delay_ms
+#define udelay rte_delay_us
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define BIT(x) (1UL << (x))
+
+#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
+do { \
+ unsigned int AQ_HW_WAIT_FOR_i; \
+ for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\
+ --AQ_HW_WAIT_FOR_i) {\
+ udelay(_US_); \
+ } \
+ if (!AQ_HW_WAIT_FOR_i) {\
+ err = -ETIMEDOUT; \
+ } \
+} while (0)
+
+#define ATL_WRITE_FLUSH(aq_hw) { (void)aq_hw_read_reg(aq_hw, 0x10); }
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val);
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
+int aq_hw_err_from_flags(struct aq_hw_s *hw);
+
+#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/atlantic/atl_logs.h b/drivers/net/atlantic/atl_logs.h
new file mode 100644
index 00000000..e3dba334
--- /dev/null
+++ b/drivers/net/atlantic/atl_logs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Aquantia Corporation
+ */
+#ifndef ATL_LOGS_H
+#define ATL_LOGS_H
+
+#include <rte_log.h>
+
+extern int atl_logtype_init;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, atl_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+extern int atl_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, atl_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
new file mode 100644
index 00000000..fd909476
--- /dev/null
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -0,0 +1,1357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Aquantia Corporation
+ */
+
+#include <rte_malloc.h>
+#include <rte_ethdev_driver.h>
+#include <rte_net.h>
+
+#include "atl_ethdev.h"
+#include "atl_hw_regs.h"
+
+#include "atl_logs.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl/hw_atl_b0_internal.h"
+
+#define ATL_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define ATL_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define ATL_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct atl_rx_entry {
+ struct rte_mbuf *mbuf;
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct atl_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct atl_rx_queue {
+ struct rte_mempool *mb_pool;
+ struct hw_atl_rxd_s *hw_ring;
+ uint64_t hw_ring_phys_addr;
+ struct atl_rx_entry *sw_ring;
+ uint16_t nb_rx_desc;
+ uint16_t rx_tail;
+ uint16_t nb_rx_hold;
+ uint16_t rx_free_thresh;
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint16_t buff_size;
+ bool l3_csum_enabled;
+ bool l4_csum_enabled;
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct atl_tx_queue {
+ struct hw_atl_txd_s *hw_ring;
+ uint64_t hw_ring_phys_addr;
+ struct atl_tx_entry *sw_ring;
+ uint16_t nb_tx_desc;
+ uint16_t tx_tail;
+ uint16_t tx_head;
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint16_t tx_free_thresh;
+ uint16_t tx_free;
+};
+
+static inline void
+atl_reset_rx_queue(struct atl_rx_queue *rxq)
+{
+ struct hw_atl_rxd_s *rxd = NULL;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
+ rxd->buf_addr = 0;
+ rxd->hdr_addr = 0;
+ }
+
+ rxq->rx_tail = 0;
+}
+
+int
+atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct atl_rx_queue *rxq;
+ const struct rte_memzone *mz;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* make sure a valid number of descriptors have been requested */
+ if (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE ||
+ nb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "Number of Rx descriptors must be "
+ "less than or equal to %d, "
+ "greater than or equal to %d", AQ_HW_MAX_RX_RING_SIZE,
+ AQ_HW_MIN_RX_RING_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->rx_queues[rx_queue_id] != NULL) {
+ atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queues[rx_queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ rxq = rte_zmalloc_socket("atlantic Rx queue", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return -ENOMEM;
+ }
+
+ /* setup queue */
+ rxq->mb_pool = mb_pool;
+ rxq->nb_rx_desc = nb_rx_desc;
+ rxq->port_id = dev->data->port_id;
+ rxq->queue_id = rx_queue_id;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+
+ rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
+ (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
+
+ /* allocate memory for the software ring */
+ rxq->sw_ring = rte_zmalloc_socket("atlantic sw rx ring",
+ nb_rx_desc * sizeof(struct atl_rx_entry),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Port %d: Cannot allocate software ring for queue %d",
+ rxq->port_id, rxq->queue_id);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "rx hw_ring", rx_queue_id,
+ HW_ATL_B0_MAX_RXD *
+ sizeof(struct hw_atl_rxd_s),
+ 128, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Port %d: Cannot allocate hardware ring for queue %d",
+ rxq->port_id, rxq->queue_id);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+ rxq->hw_ring = mz->addr;
+ rxq->hw_ring_phys_addr = mz->iova;
+
+ atl_reset_rx_queue(rxq);
+
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ return 0;
+}
+
+static inline void
+atl_reset_tx_queue(struct atl_tx_queue *txq)
+{
+ struct atl_tx_entry *tx_entry;
+ union hw_atl_txc_s *txc;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Pointer to txq is NULL");
+ return;
+ }
+
+ tx_entry = txq->sw_ring;
+
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txc = (union hw_atl_txc_s *)&txq->hw_ring[i];
+ txc->flags1 = 0;
+ txc->flags2 = 2;
+ }
+
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->hw_ring[i].dd = 1;
+ tx_entry[i].mbuf = NULL;
+ }
+
+ txq->tx_tail = 0;
+ txq->tx_head = 0;
+ txq->tx_free = txq->nb_tx_desc - 1;
+}
+
+int
+atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct atl_tx_queue *txq;
+ const struct rte_memzone *mz;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* make sure a valid number of descriptors have been requested */
+ if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE ||
+ nb_tx_desc > AQ_HW_MAX_TX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "Number of Tx descriptors must be "
+ "less than or equal to %d, "
+ "greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE,
+ AQ_HW_MIN_TX_RING_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * if this queue existed already, free the associated memory. The
+ * queue cannot be reused in case we need to allocate memory on
+ * different socket than was previously used.
+ */
+ if (dev->data->tx_queues[tx_queue_id] != NULL) {
+ atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queues[tx_queue_id] = NULL;
+ }
+
+ /* allocate memory for the queue structure */
+ txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+ return -ENOMEM;
+ }
+
+ /* setup queue */
+ txq->nb_tx_desc = nb_tx_desc;
+ txq->port_id = dev->data->port_id;
+ txq->queue_id = tx_queue_id;
+ txq->tx_free_thresh = tx_conf->tx_free_thresh;
+
+
+ /* allocate memory for the software ring */
+ txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring",
+ nb_tx_desc * sizeof(struct atl_tx_entry),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Port %d: Cannot allocate software ring for queue %d",
+ txq->port_id, txq->queue_id);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate memory for the hardware descriptor ring. A memzone large
+ * enough to hold the maximum ring size is requested to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id,
+ HW_ATL_B0_MAX_TXD * sizeof(struct hw_atl_txd_s),
+ 128, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Port %d: Cannot allocate hardware ring for queue %d",
+ txq->port_id, txq->queue_id);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ txq->hw_ring = mz->addr;
+ txq->hw_ring_phys_addr = mz->iova;
+
+ atl_reset_tx_queue(txq);
+
+ dev->data->tx_queues[tx_queue_id] = txq;
+ return 0;
+}
+
+int
+atl_tx_init(struct rte_eth_dev *eth_dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct atl_tx_queue *txq;
+ uint64_t base_addr = 0;
+ int i = 0;
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ base_addr = txq->hw_ring_phys_addr;
+
+ err = hw_atl_b0_hw_ring_tx_init(hw, base_addr,
+ txq->queue_id,
+ txq->nb_tx_desc, 0,
+ txq->port_id);
+
+ if (err) {
+ PMD_INIT_LOG(ERR,
+ "Port %d: Cannot init TX queue %d",
+ txq->port_id, txq->queue_id);
+ break;
+ }
+ }
+
+ return err;
+}
+
+int
+atl_rx_init(struct rte_eth_dev *eth_dev)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss;
+ struct atl_rx_queue *rxq;
+ uint64_t base_addr = 0;
+ int i = 0;
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ base_addr = rxq->hw_ring_phys_addr;
+
+ /* Take requested pool mbuf size and adapt
+ * descriptor buffer to best fit
+ */
+ int buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+
+ buff_size = RTE_ALIGN_FLOOR(buff_size, 1024);
+ if (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) {
+ PMD_INIT_LOG(WARNING,
+ "Port %d queue %d: mem pool buff size is too big\n",
+ rxq->port_id, rxq->queue_id);
+ buff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX;
+ }
+ if (buff_size < 1024) {
+ PMD_INIT_LOG(ERR,
+ "Port %d queue %d: mem pool buff size is too small\n",
+ rxq->port_id, rxq->queue_id);
+ return -EINVAL;
+ }
+ rxq->buff_size = buff_size;
+
+ err = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id,
+ rxq->nb_rx_desc, buff_size, 0,
+ rxq->port_id);
+
+ if (err) {
+ PMD_INIT_LOG(ERR, "Port %d: Cannot init RX queue %d",
+ rxq->port_id, rxq->queue_id);
+ break;
+ }
+ }
+
+ for (i = rss_params->indirection_table_size; i--;)
+ rss_params->indirection_table[i] = i &
+ (eth_dev->data->nb_rx_queues - 1);
+ hw_atl_b0_hw_rss_set(hw, rss_params);
+ return err;
+}
+
+static int
+atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq)
+{
+ struct atl_rx_entry *rx_entry = rxq->sw_ring;
+ struct hw_atl_rxd_s *rxd;
+ uint64_t dma_addr = 0;
+ uint32_t i = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* fill Rx ring */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Port %d: mbuf alloc failed for rx queue %d",
+ rxq->port_id, rxq->queue_id);
+ return -ENOMEM;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->port = rxq->port_id;
+
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
+ rxd->buf_addr = dma_addr;
+ rxd->hdr_addr = 0;
+ rx_entry[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+static void
+atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+int
+atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct atl_rx_queue *rxq = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (atl_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR,
+ "Port %d: Allocate mbufs for queue %d failed",
+ rxq->port_id, rxq->queue_id);
+ return -1;
+ }
+
+ hw_atl_b0_hw_ring_rx_start(hw, rx_queue_id);
+
+ rte_wmb();
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1,
+ rx_queue_id);
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct atl_rx_queue *rxq = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ hw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id);
+
+ atl_rx_queue_release_mbufs(rxq);
+ atl_reset_rx_queue(rxq);
+
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+atl_rx_queue_release(void *rx_queue)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue != NULL) {
+ struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+
+ atl_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
+}
+
+static void
+atl_tx_queue_release_mbufs(struct atl_tx_queue *txq)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+int
+atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id);
+
+ rte_wmb();
+ hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id);
+ dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct atl_tx_queue *txq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id);
+
+ atl_tx_queue_release_mbufs(txq);
+ atl_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+atl_tx_queue_release(void *tx_queue)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue != NULL) {
+ struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+
+ atl_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
+}
+
+void
+atl_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ atl_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = 0;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ atl_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = 0;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+int
+atl_start_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (atl_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR,
+ "Port %d: Start Tx queue %d failed",
+ dev->data->port_id, i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (atl_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR,
+ "Port %d: Start Rx queue %d failed",
+ dev->data->port_id, i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int
+atl_stop_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (atl_tx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR,
+ "Port %d: Stop Tx queue %d failed",
+ dev->data->port_id, i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (atl_rx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR,
+ "Port %d: Stop Rx queue %d failed",
+ dev->data->port_id, i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+void
+atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct atl_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+}
+
+void
+atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct atl_tx_queue *txq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+}
+
+/* Return Rx queue avail count */
+
+uint32_t
+atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct atl_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (rxq == NULL)
+ return 0;
+
+ return rxq->nb_rx_desc - rxq->nb_rx_hold;
+}
+
+int
+atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct atl_rx_queue *rxq = rx_queue;
+ struct hw_atl_rxd_wb_s *rxd;
+ uint32_t idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ idx = rxq->rx_tail + offset;
+
+ if (idx >= rxq->nb_rx_desc)
+ idx -= rxq->nb_rx_desc;
+
+ rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];
+
+ if (rxd->dd)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct atl_tx_queue *txq = tx_queue;
+ struct hw_atl_txd_s *txd;
+ uint32_t idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ idx = txq->tx_tail + offset;
+
+ if (idx >= txq->nb_tx_desc)
+ idx -= txq->nb_tx_desc;
+
+ txd = &txq->hw_ring[idx];
+
+ if (txd->dd)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+static int
+atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)
+{
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct atl_rx_queue *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_id >= dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", queue_id);
+ return -EINVAL;
+ }
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ if (rxq == NULL)
+ return 0;
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id);
+
+ return 0;
+}
+
+int
+atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ return atl_rx_enable_intr(eth_dev, queue_id, true);
+}
+
+int
+atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ return atl_rx_enable_intr(eth_dev, queue_id, false);
+}
+
+uint16_t
+atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+static uint64_t
+atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
+ struct hw_atl_rxd_wb_s *rxd_wb)
+{
+ uint64_t mbuf_flags = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* IPv4 ? */
+ if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
+ /* IPv4 csum error ? */
+ if (rxd_wb->rx_stat & BIT(1))
+ mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ } else {
+ mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ }
+
+ /* CSUM calculated ? */
+ if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
+ if (rxd_wb->rx_stat & BIT(2))
+ mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ } else {
+ mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ }
+
+ return mbuf_flags;
+}
+
+static uint32_t
+atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb)
+{
+ uint32_t type = RTE_PTYPE_UNKNOWN;
+ uint16_t l2_l3_type = rxd_wb->pkt_type & 0x3;
+ uint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2;
+
+ switch (l2_l3_type) {
+ case 0:
+ type = RTE_PTYPE_L3_IPV4;
+ break;
+ case 1:
+ type = RTE_PTYPE_L3_IPV6;
+ break;
+ case 2:
+ type = RTE_PTYPE_L2_ETHER;
+ break;
+ case 3:
+ type = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ }
+
+ switch (l4_type) {
+ case 0:
+ type |= RTE_PTYPE_L4_TCP;
+ break;
+ case 1:
+ type |= RTE_PTYPE_L4_UDP;
+ break;
+ case 2:
+ type |= RTE_PTYPE_L4_SCTP;
+ break;
+ case 3:
+ type |= RTE_PTYPE_L4_ICMP;
+ break;
+ }
+
+ if (rxd_wb->pkt_type & BIT(5))
+ type |= RTE_PTYPE_L2_ETHER_VLAN;
+
+ return type;
+}
+
+uint16_t
+atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ struct atl_adapter *adapter =
+ ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ struct atl_rx_entry *sw_ring = rxq->sw_ring;
+
+ struct rte_mbuf *new_mbuf;
+ struct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first;
+ struct atl_rx_entry *rx_entry;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = 0;
+ struct hw_atl_rxd_wb_s rxd_wb;
+ struct hw_atl_rxd_s *rxd = NULL;
+ uint16_t tail = rxq->rx_tail;
+ uint64_t dma_addr;
+ uint16_t pkt_len = 0;
+
+ while (nb_rx < nb_pkts) {
+ uint16_t eop_tail = tail;
+
+ rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
+ rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
+
+ if (!rxd_wb.dd) { /* RxD is not done */
+ break;
+ }
+
+ PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
+ "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id,
+ (unsigned int)tail, (unsigned int)rxd_wb.eop,
+ (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len),
+ rxd_wb.rss_hash, rxd_wb.rss_type);
+
+ /* RxD is not done */
+ if (!rxd_wb.eop) {
+ while (true) {
+ struct hw_atl_rxd_wb_s *eop_rxwbd;
+
+ eop_tail = (eop_tail + 1) % rxq->nb_rx_desc;
+ eop_rxwbd = (struct hw_atl_rxd_wb_s *)
+ &rxq->hw_ring[eop_tail];
+ if (!eop_rxwbd->dd) {
+ /* no EOP received yet */
+ eop_tail = tail;
+ break;
+ }
+ if (eop_rxwbd->dd && eop_rxwbd->eop)
+ break;
+ }
+ /* No EOP in ring */
+ if (eop_tail == tail)
+ break;
+ }
+ rx_mbuf_prev = NULL;
+ rx_mbuf_first = NULL;
+
+ /* Run through packet segments */
+ while (true) {
+ new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (new_mbuf == NULL) {
+ PMD_RX_LOG(ERR,
+ "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ dev->data->rx_mbuf_alloc_failed++;
+ adapter->sw_stats.rx_nombuf++;
+ goto err_stop;
+ }
+
+ nb_hold++;
+ rx_entry = &sw_ring[tail];
+
+ rx_mbuf = rx_entry->mbuf;
+ rx_entry->mbuf = new_mbuf;
+ dma_addr = rte_cpu_to_le_64(
+ rte_mbuf_data_iova_default(new_mbuf));
+
+ /* setup RX descriptor */
+ rxd->hdr_addr = 0;
+ rxd->buf_addr = dma_addr;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * < - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len);
+ rx_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch1((char *)rx_mbuf->buf_addr +
+ rx_mbuf->data_off);
+ rx_mbuf->nb_segs = 0;
+ rx_mbuf->next = NULL;
+ rx_mbuf->pkt_len = pkt_len;
+ rx_mbuf->data_len = pkt_len;
+ if (rxd_wb.eop) {
+ u16 remainder_len = pkt_len % rxq->buff_size;
+ if (!remainder_len)
+ remainder_len = rxq->buff_size;
+ rx_mbuf->data_len = remainder_len;
+ } else {
+ rx_mbuf->data_len = pkt_len > rxq->buff_size ?
+ rxq->buff_size : pkt_len;
+ }
+ rx_mbuf->port = rxq->port_id;
+
+ rx_mbuf->hash.rss = rxd_wb.rss_hash;
+
+ rx_mbuf->vlan_tci = rxd_wb.vlan;
+
+ rx_mbuf->ol_flags =
+ atl_desc_to_offload_flags(rxq, &rxd_wb);
+
+ rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
+
+ if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
+ rx_mbuf->ol_flags |= PKT_RX_VLAN;
+ rx_mbuf->vlan_tci = rxd_wb.vlan;
+
+ if (cfg->vlan_strip)
+ rx_mbuf->ol_flags |=
+ PKT_RX_VLAN_STRIPPED;
+ }
+
+ if (!rx_mbuf_first)
+ rx_mbuf_first = rx_mbuf;
+ rx_mbuf_first->nb_segs++;
+
+ if (rx_mbuf_prev)
+ rx_mbuf_prev->next = rx_mbuf;
+ rx_mbuf_prev = rx_mbuf;
+
+ tail = (tail + 1) % rxq->nb_rx_desc;
+ /* Prefetch next mbufs */
+ rte_prefetch0(sw_ring[tail].mbuf);
+ if ((tail & 0x3) == 0) {
+ rte_prefetch0(&sw_ring[tail]);
+ rte_prefetch0(&sw_ring[tail]);
+ }
+
+ /* filled mbuf_first */
+ if (rxd_wb.eop)
+ break;
+ rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
+ rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
+ };
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rx_mbuf_first;
+ adapter->sw_stats.q_ipackets[rxq->queue_id]++;
+ adapter->sw_stats.q_ibytes[rxq->queue_id] +=
+ rx_mbuf_first->pkt_len;
+
+ PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
+ rx_mbuf_first->nb_segs,
+ rx_mbuf_first->pkt_len);
+ }
+
+err_stop:
+
+ rxq->rx_tail = tail;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
+ (unsigned int)tail, (unsigned int)nb_hold,
+ (unsigned int)nb_rx);
+ tail = (uint16_t)((tail == 0) ?
+ (rxq->nb_rx_desc - 1) : (tail - 1));
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id);
+
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+static void
+atl_xmit_cleanup(struct atl_tx_queue *txq)
+{
+ struct atl_tx_entry *sw_ring;
+ struct hw_atl_txd_s *txd;
+ int to_clean = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (txq != NULL) {
+ sw_ring = txq->sw_ring;
+ int head = txq->tx_head;
+ int cnt;
+ int i;
+
+ for (i = 0, cnt = head; ; i++) {
+ txd = &txq->hw_ring[cnt];
+
+ if (txd->dd)
+ to_clean++;
+
+ cnt = (cnt + 1) % txq->nb_tx_desc;
+ if (cnt == txq->tx_tail)
+ break;
+ }
+
+ if (to_clean == 0)
+ return;
+
+ while (to_clean) {
+ txd = &txq->hw_ring[head];
+
+ struct atl_tx_entry *rx_entry = &sw_ring[head];
+
+ if (rx_entry->mbuf) {
+ rte_pktmbuf_free_seg(rx_entry->mbuf);
+ rx_entry->mbuf = NULL;
+ }
+
+ if (txd->dd)
+ to_clean--;
+
+ txd->buf_addr = 0;
+ txd->flags = 0;
+
+ head = (head + 1) % txq->nb_tx_desc;
+ txq->tx_free++;
+ }
+
+ txq->tx_head = head;
+ }
+}
+
+static int
+atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
+{
+ uint32_t tx_cmd = 0;
+ uint64_t ol_flags = tx_pkt->ol_flags;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
+
+ tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
+
+ txc->cmd = 0x4;
+
+ if (ol_flags & PKT_TX_IPV6)
+ txc->cmd |= 0x2;
+
+ txc->l2_len = tx_pkt->l2_len;
+ txc->l3_len = tx_pkt->l3_len;
+ txc->l4_len = tx_pkt->l4_len;
+
+ txc->mss_len = tx_pkt->tso_segsz;
+ }
+
+ if (ol_flags & PKT_TX_VLAN) {
+ tx_cmd |= tx_desc_cmd_vlan;
+ txc->vlan_tag = tx_pkt->vlan_tci;
+ }
+
+ if (tx_cmd) {
+ txc->type = tx_desc_type_ctx;
+ txc->idx = 0;
+ }
+
+ return tx_cmd;
+}
+
+static inline void
+atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
+ uint32_t tx_cmd)
+{
+ txd->cmd |= tx_desc_cmd_fcs;
+ txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+ /* L4 csum requested */
+ txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+ txd->cmd |= tx_cmd;
+}
+
+static inline void
+atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
+ struct rte_mbuf *tx_pkt)
+{
+ struct atl_adapter *adapter =
+ ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
+ uint32_t pay_len = 0;
+ int tail = 0;
+ struct atl_tx_entry *tx_entry;
+ uint64_t buf_dma_addr;
+ struct rte_mbuf *m_seg;
+ union hw_atl_txc_s *txc = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ u32 tx_cmd = 0U;
+ int desc_count = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ tail = txq->tx_tail;
+
+ txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
+
+ txc->flags1 = 0U;
+ txc->flags2 = 0U;
+
+ tx_cmd = atl_tso_setup(tx_pkt, txc);
+
+ if (tx_cmd) {
+ /* We've consumed the first desc, adjust counters */
+ tail = (tail + 1) % txq->nb_tx_desc;
+ txq->tx_tail = tail;
+ txq->tx_free -= 1;
+
+ txd = &txq->hw_ring[tail];
+ txd->flags = 0U;
+ } else {
+ txd = (struct hw_atl_txd_s *)txc;
+ }
+
+ txd->ct_en = !!tx_cmd;
+
+ txd->type = tx_desc_type_desc;
+
+ atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
+
+ if (tx_cmd)
+ txd->ct_idx = 0;
+
+ pay_len = tx_pkt->pkt_len;
+
+ txd->pay_len = pay_len;
+
+ for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
+ if (desc_count > 0) {
+ txd = &txq->hw_ring[tail];
+ txd->flags = 0U;
+ }
+
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+ txd->type = tx_desc_type_desc;
+ txd->len = m_seg->data_len;
+ txd->pay_len = pay_len;
+
+ /* Store mbuf for freeing later */
+ tx_entry = &txq->sw_ring[tail];
+
+ if (tx_entry->mbuf)
+ rte_pktmbuf_free_seg(tx_entry->mbuf);
+ tx_entry->mbuf = m_seg;
+
+ tail = (tail + 1) % txq->nb_tx_desc;
+
+ desc_count++;
+ }
+
+ // Last descriptor requires EOP and WB
+ txd->eop = 1U;
+ txd->cmd |= tx_desc_cmd_wb;
+
+ hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
+
+ txq->tx_tail = tail;
+
+ txq->tx_free -= desc_count;
+
+ adapter->sw_stats.q_opackets[txq->queue_id]++;
+ adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
+}
+
+uint16_t
+atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev = NULL;
+ struct aq_hw_s *hw = NULL;
+ struct atl_tx_queue *txq = tx_queue;
+ struct rte_mbuf *tx_pkt;
+ uint16_t nb_tx;
+
+ dev = &rte_eth_devices[txq->port_id];
+ hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_TX_LOG(DEBUG,
+ "port %d txq %d pkts: %d tx_free=%d tx_tail=%d tx_head=%d",
+ txq->port_id, txq->queue_id, nb_pkts, txq->tx_free,
+ txq->tx_tail, txq->tx_head);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ tx_pkt = *tx_pkts++;
+
+ /* Clean Tx queue if needed */
+ if (txq->tx_free < txq->tx_free_thresh)
+ atl_xmit_cleanup(txq);
+
+ /* Check if we have enough free descriptors */
+ if (txq->tx_free < tx_pkt->nb_segs)
+ break;
+
+ /* check mbuf is valid */
+ if ((tx_pkt->nb_segs == 0) ||
+ ((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL)))
+ break;
+
+ /* Send the packet */
+ atl_xmit_pkt(hw, txq, tx_pkt);
+ }
+
+ PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx);
+
+ return nb_tx;
+}
+
diff --git a/drivers/net/atlantic/atl_types.h b/drivers/net/atlantic/atl_types.h
new file mode 100644
index 00000000..3d90f6ca
--- /dev/null
+++ b/drivers/net/atlantic/atl_types.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Aquantia Corporation
+ */
+#ifndef ATL_TYPES_H
+#define ATL_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdbool.h>
+#include <netinet/in.h>
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#include "hw_atl/hw_atl_b0_internal.h"
+#include "hw_atl/hw_atl_utils.h"
+
+struct aq_hw_link_status_s {
+ unsigned int mbps;
+};
+
+struct aq_stats_s {
+ u64 uprc;
+ u64 mprc;
+ u64 bprc;
+ u64 erpt;
+ u64 uptc;
+ u64 mptc;
+ u64 bptc;
+ u64 erpr;
+ u64 mbtc;
+ u64 bbtc;
+ u64 mbrc;
+ u64 bbrc;
+ u64 ubrc;
+ u64 ubtc;
+ u64 dpc;
+ u64 dma_pkt_rc;
+ u64 dma_pkt_tc;
+ u64 dma_oct_rc;
+ u64 dma_oct_tc;
+};
+
+struct aq_rss_parameters {
+ u16 base_cpu_number;
+ u16 indirection_table_size;
+ u16 hash_secret_key_size;
+ u32 hash_secret_key[HW_ATL_B0_RSS_HASHKEY_BITS / 8];
+ u8 indirection_table[HW_ATL_B0_RSS_REDIRECTION_MAX];
+};
+
+struct aq_hw_cfg_s {
+ bool is_lro;
+ bool is_rss;
+ unsigned int num_rss_queues;
+ int wol;
+
+ int link_speed_msk;
+ int irq_type;
+ int irq_mask;
+ unsigned int vecs;
+
+ bool vlan_strip;
+ uint32_t vlan_filter[HW_ATL_B0_MAX_VLAN_IDS];
+ uint32_t flow_control;
+
+ struct aq_rss_parameters aq_rss;
+};
+
+struct aq_hw_s {
+ u16 device_id;
+ u16 vendor_id;
+ bool adapter_stopped;
+
+ u8 rbl_enabled:1;
+ struct aq_hw_cfg_s *aq_nic_cfg;
+ const struct aq_fw_ops *aq_fw_ops;
+ void *mmio;
+
+ struct aq_hw_link_status_s aq_link_status;
+ bool is_autoneg;
+
+ struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct aq_stats_s curr_stats;
+
+ u64 speed;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 rpc_tid;
+ struct hw_aq_atl_utils_fw_rpc rpc;
+};
+
+struct aq_fw_ops {
+ int (*init)(struct aq_hw_s *self);
+
+ int (*deinit)(struct aq_hw_s *self);
+
+ int (*reset)(struct aq_hw_s *self);
+
+ int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
+
+ int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+ int (*set_state)(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
+ int (*update_link_status)(struct aq_hw_s *self);
+
+ int (*update_stats)(struct aq_hw_s *self);
+
+ int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
+ u8 *mac);
+
+ int (*get_temp)(struct aq_hw_s *self, int *temp);
+
+ int (*get_cable_len)(struct aq_hw_s *self, int *cable_len);
+
+ int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
+
+ int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
+ u32 *supported_rates);
+
+ int (*set_flow_control)(struct aq_hw_s *self);
+
+ int (*led_control)(struct aq_hw_s *self, u32 mode);
+
+ int (*get_eeprom)(struct aq_hw_s *self, u32 *data, u32 len);
+
+ int (*set_eeprom)(struct aq_hw_s *self, u32 *data, u32 len);
+};
+
+struct atl_sw_stats {
+ u64 crcerrs;
+ u64 errbc;
+ u64 mspdc;
+ u64 mpctotal;
+ u64 mpc[8];
+ u64 mlfc;
+ u64 mrfc;
+ u64 rlec;
+ u64 lxontxc;
+ u64 lxonrxc;
+ u64 lxofftxc;
+ u64 lxoffrxc;
+ u64 pxontxc[8];
+ u64 pxonrxc[8];
+ u64 pxofftxc[8];
+ u64 pxoffrxc[8];
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 tor;
+ u64 tpr;
+ u64 tpt;
+ u64 mptc;
+ u64 bptc;
+ u64 xec;
+ u64 fccrc;
+ u64 ldpcec;
+ u64 pcrc8ec;
+
+ u64 rx_nombuf;
+ u64 q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ u64 q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ u64 q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ u64 q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ u64 q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+};
+
+#endif
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
new file mode 100644
index 00000000..9400e0ed
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../atl_types.h"
+#include "hw_atl_b0.h"
+
+#include "../atl_hw_regs.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_b0_internal.h"
+#include "hw_atl_llh_internal.h"
+#include "../atl_logs.h"
+
+int hw_atl_b0_hw_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ self->aq_fw_ops->set_state(self, MPI_RESET);
+
+ return err;
+}
+
+static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
+{
+ u32 tc = 0U;
+ u32 buff_size = 0U;
+ unsigned int i_priority = 0U;
+ bool is_rx_flow_control = false;
+
+ /* TPS Descriptor rate init */
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ /* TPS TC credits init */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+
+ /* Tx buf size */
+ buff_size = HW_ATL_B0_TXBUF_MAX;
+
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
+
+ /* QoS Rx buf size per TC */
+ tc = 0;
+ is_rx_flow_control = 0;
+ buff_size = HW_ATL_B0_RXBUF_MAX;
+
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self,
+ is_rx_flow_control ? 1U : 0U,
+ tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (i_priority = 8U; i_priority--;)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+/* calc hash only in IPv4 header, regardless of presence of TCP */
+#define pif_rpf_rss_ipv4_hdr_only_i (1 << 4)
+/* calc hash only if TCP header and IPv4 */
+#define pif_rpf_rss_ipv4_tcp_hdr_only_i (1 << 3)
+/* calc hash only in IPv6 header, regardless of presence of TCP */
+#define pif_rpf_rss_ipv6_hdr_only_i (1 << 2)
+/* calc hash only if TCP header and IPv4 */
+#define pif_rpf_rss_ipv6_tcp_hdr_only_i (1 << 1)
+/* bug 5124 - rss hashing types - FIXME */
+#define pif_rpf_rss_dont_use_udp_i (1 << 0)
+
+static int hw_atl_b0_hw_rss_hash_type_set(struct aq_hw_s *self)
+{
+ /* misc */
+ unsigned int control_reg_val =
+ IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U;
+
+ /* RSS hash type set for IP/TCP */
+ control_reg_val |= pif_rpf_rss_ipv4_hdr_only_i;//0x1EU;
+
+ aq_hw_write_reg(self, 0x5040U, control_reg_val);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ struct aq_hw_cfg_s *cfg = self->aq_nic_cfg;
+ int err = 0;
+ unsigned int i = 0U;
+ unsigned int addr = 0U;
+
+ for (i = 10, addr = 0U; i--; ++addr) {
+ u32 key_data = cfg->is_rss ?
+ htonl(rss_params->hash_secret_key[i]) : 0U;
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ /* RSS Ring selection */
+ hw_atl_reg_rx_flr_rss_control1set(self,
+ cfg->is_rss ? 0xB3333333U : 0x00000000U);
+ hw_atl_b0_hw_rss_hash_type_set(self);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+
+int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ u32 i = 0;
+ u32 addr = 0;
+ u32 val = 0;
+ u32 shift = 0;
+ int err = 0;
+
+ for (i = 0; i < HW_ATL_B0_RSS_REDIRECTION_MAX; i++) {
+ val |= (u32)(indirection_table[i] % num_rss_queues) << shift;
+ shift += 3;
+
+ if (shift < 16)
+ continue;
+
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, val & 0xffff);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, addr);
+
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
+
+ if (err < 0)
+ goto err_exit;
+
+ shift -= 16;
+ val >>= 16;
+ addr++;
+ }
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self)
+ /*struct aq_nic_cfg_s *aq_nic_cfg)*/
+{
+ unsigned int i;
+
+ /* TX checksums offloads*/
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
+
+ /* RX checksums offloads*/
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
+
+ /* LSO offloads*/
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+
+/* LRO offloads */
+ {
+ unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
+ ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
+ ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
+
+ for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
+ hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
+
+ hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+ hw_atl_rpo_lro_inactive_interval_set(self, 0);
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
+
+ hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
+
+ hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
+
+ hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
+
+ hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+
+ hw_atl_rpo_lro_pkt_lim_set(self, 1U);
+
+ hw_atl_rpo_lro_en_set(self,
+ self->aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ }
+ return aq_hw_err_from_flags(self);
+}
+
+static
+int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
+{
+ /* Tx TC/RSS number config */
+ hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ 0x00010000U : 0x00000000U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
+
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static
+int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_hw_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); /* 1: 4TC/8Queues */
+
+ /* Rx flow control */
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ /* RSS Ring selection */
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ 0xB3333333U : 0x00000000U);
+
+ /* Multicast filters */
+ for (i = HW_ATL_B0_MAC_MAX; i--;) {
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+ /* Vlan filters */
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+
+ /* VLAN proimisc bu defauld */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Rx Interrupts */
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+
+ hw_atl_b0_hw_rss_hash_type_set(self);
+
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+{
+ int err = 0;
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+{
+ static u32 aq_hw_atl_igcr_table_[4][2] = {
+ { 0x20000080U, 0x20000080U }, /* AQ_IRQ_INVALID */
+ { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
+ { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
+ { 0x200000A2U, 0x200000A6U } /* AQ_IRQ_MSIX */
+ };
+
+ int err = 0;
+ u32 val;
+
+ struct aq_hw_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+
+ hw_atl_b0_hw_init_tx_path(self);
+ hw_atl_b0_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
+
+ hw_atl_b0_hw_qos_set(self);
+ hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ /* Force limit MRRS on RDM/TDM to 2K */
+ val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+ aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+ (val & ~0x707) | 0x404);
+
+ /* TX DMA total request limit. B0 hardware is not capable to
+ * handle more than (8K-MRRS) incoming DMA data.
+ * Value 24 in 256byte units
+ */
+ aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ hw_atl_itr_irq_auto_masklsw_set(self, 0xffffffff);
+
+ /* Interrupts */
+ hw_atl_reg_gen_irq_map_set(self, 0, 0);
+ hw_atl_reg_gen_irq_map_set(self, 0x80 | ATL_IRQ_CAUSE_LINK, 3);
+
+ hw_atl_b0_hw_offload_set(self);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, int index)
+{
+ hw_atl_tdm_tx_desc_en_set(self, 1, index);
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, int index)
+{
+ hw_atl_rdm_rx_desc_en_set(self, 1, index);
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_start(struct aq_hw_s *self)
+{
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, int tail, int index)
+{
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, tail, index);
+ return 0;
+}
+
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr,
+ int index, int size, int buff_size, int cpu, int vec)
+{
+ u32 dma_desc_addr_lsw = (u32)base_addr;
+ u32 dma_desc_addr_msw = (u32)(base_addr >> 32);
+
+ hw_atl_rdm_rx_desc_en_set(self, false, index);
+
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index);
+
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ index);
+
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self, dma_desc_addr_msw,
+ index);
+
+ hw_atl_rdm_rx_desc_len_set(self, size / 8U, index);
+
+ hw_atl_rdm_rx_desc_data_buff_size_set(self, buff_size / 1024U, index);
+
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, index);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, index);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, index);
+
+ /* Rx ring set mode */
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_rx_set(self, vec, index);
+ hw_atl_itr_irq_map_en_rx_set(self, true, index);
+
+ hw_atl_rdm_cpu_id_set(self, cpu, index);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, index);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, index);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, index);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
+ int index, int size, int cpu, int vec)
+{
+ u32 dma_desc_lsw_addr = (u32)base_addr;
+ u32 dma_desc_msw_addr = (u32)(base_addr >> 32);
+
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ index);
+
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ index);
+
+ hw_atl_tdm_tx_desc_len_set(self, size / 8U, index);
+
+ hw_atl_b0_hw_tx_ring_tail_update(self, 0, index);
+
+ /* Set Tx threshold */
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, index);
+
+ /* Mapping interrupt vector */
+ hw_atl_itr_irq_map_tx_set(self, vec, index);
+ hw_atl_itr_irq_map_en_tx_set(self, true, index);
+
+ hw_atl_tdm_cpu_id_set(self, cpu, index);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, index);
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, int index)
+{
+ hw_atl_tdm_tx_desc_en_set(self, 0U, index);
+ return aq_hw_err_from_flags(self);
+}
+
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, int index)
+{
+ hw_atl_rdm_rx_desc_en_set(self, 0U, index);
+ return aq_hw_err_from_flags(self);
+}
+
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
new file mode 100644
index 00000000..06feb56c
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_B0_H
+#define HW_ATL_B0_H
+
+int hw_atl_b0_hw_reset(struct aq_hw_s *self);
+int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr);
+
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, uint64_t base_addr,
+ int index, int size, int cpu, int vec);
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, uint64_t base_addr,
+ int index, int size, int buff_size, int cpu, int vec);
+
+int hw_atl_b0_hw_start(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, int index);
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, int index);
+
+
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, int index);
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, int index);
+
+
+int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, int tail, int index);
+
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
+
+#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h
new file mode 100644
index 00000000..48152ead
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_B0_INTERNAL_H
+#define HW_ATL_B0_INTERNAL_H
+
+
+#define HW_ATL_B0_MTU_JUMBO 16352U
+#define HW_ATL_B0_MTU 1514U
+
+#define HW_ATL_B0_TX_RINGS 4U
+#define HW_ATL_B0_RX_RINGS 4U
+
+#define HW_ATL_B0_RINGS_MAX 32U
+#define HW_ATL_B0_TXD_SIZE (16U)
+#define HW_ATL_B0_RXD_SIZE (16U)
+
+#define HW_ATL_B0_MAC 0U
+#define HW_ATL_B0_MAC_MIN 1U
+#define HW_ATL_B0_MAC_MAX 33U
+
+/* Maximum supported VLAN filters */
+#define HW_ATL_B0_MAX_VLAN_IDS 16
+
+/* UCAST/MCAST filters */
+#define HW_ATL_B0_UCAST_FILTERS_MAX 38
+#define HW_ATL_B0_MCAST_FILTERS_MAX 8
+
+/* interrupts */
+#define HW_ATL_B0_ERR_INT 8U
+#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000)
+#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000)
+#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000)
+
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001)
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002)
+#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0)
+#define HW_ATL_B0_TXD_CTL_DD (0x00100000)
+#define HW_ATL_B0_TXD_CTL_EOP (0x00200000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22)
+#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23)
+#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24)
+#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25)
+#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26)
+#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27)
+#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28)
+
+#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21)
+#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22)
+
+#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_B0_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
+
+#define HW_ATL_B0_TXBUF_MAX 160U
+#define HW_ATL_B0_RXBUF_MAX 320U
+
+#define HW_ATL_B0_RXD_BUF_SIZE_MAX (16 * 1024)
+
+#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
+#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
+
+#define HW_ATL_B0_TCRSS_4_8 1
+#define HW_ATL_B0_TC_MAX 1U
+#define HW_ATL_B0_RSS_MAX 8U
+
+#define HW_ATL_B0_LRO_RXD_MAX 2U
+#define HW_ATL_B0_RS_SLIP_ENABLED 0U
+
+/* (256k -1(max pay_len) - 54(header)) */
+#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U
+
+/* (256k -1(max pay_len) - 74(header)) */
+#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U
+
+#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U
+#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU
+
+#define HW_ATL_B0_FW_SEMA_RAM 0x2U
+
+#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000)
+
+#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007)
+#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008)
+#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0)
+#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000)
+#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000)
+
+#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */
+#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */
+#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000)
+
+#define HW_ATL_B0_RXD_DD (0x1)
+#define HW_ATL_B0_RXD_NCEA0 (0x1)
+
+#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F)
+#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0)
+#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000)
+#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000)
+#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000)
+
+#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001)
+#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002)
+#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C)
+#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004)
+#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008)
+#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010)
+#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0)
+#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000)
+
+#define L2_FILTER_ACTION_DISCARD (0x0)
+#define L2_FILTER_ACTION_HOST (0x1)
+
+#define HW_ATL_B0_UCP_0X370_REG (0x370)
+
+#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10)
+
+#define HW_ATL_INTR_MODER_MAX 0x1FF
+#define HW_ATL_INTR_MODER_MIN 0xFF
+
+#define HW_ATL_B0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_B0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_B0_MAX_RXD 8184U
+#define HW_ATL_B0_MAX_TXD 8184U
+
+/* HW layer capabilities */
+
+#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/atlantic/hw_atl/hw_atl_llh.c
new file mode 100644
index 00000000..2dc5be2f
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_llh.c
@@ -0,0 +1,1490 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#include "hw_atl_llh.h"
+
+#include "../atl_hw_regs.h"
+#include "hw_atl_llh_internal.h"
+
+/* global */
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
+}
+
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
+}
+
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+ HW_ATL_GLB_REG_RES_DIS_MSK,
+ HW_ATL_GLB_REG_RES_DIS_SHIFT,
+ glb_reg_res_dis);
+}
+
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
+}
+
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT);
+}
+
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
+}
+
+/* stats */
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
+}
+
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
+}
+
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
+}
+
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
+}
+
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
+}
+
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
+}
+
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
+}
+
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
+}
+
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
+}
+
+/* interrupt */
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
+}
+
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx)
+{
+/* register address for bitfield imr_rx{r}_en */
+ static const u32 itr_imr_rxren_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_rx{r}_en */
+ static const u32 itr_imr_rxren_msk[32] = {
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
+ };
+
+/* lower bit position of bitfield imr_rx{r}_en */
+ static const u32 itr_imr_rxren_shift[32] = {
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx],
+ itr_imr_rxren_msk[rx],
+ itr_imr_rxren_shift[rx],
+ irq_map_en_rx);
+}
+
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx)
+{
+/* register address for bitfield imr_tx{t}_en */
+ static const u32 itr_imr_txten_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_tx{t}_en */
+ static const u32 itr_imr_txten_msk[32] = {
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}_en */
+ static const u32 itr_imr_txten_shift[32] = {
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx],
+ itr_imr_txten_msk[tx],
+ itr_imr_txten_shift[tx],
+ irq_map_en_tx);
+}
+
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}[4:0] */
+ static const u32 itr_imr_rxr_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_rx{r}[4:0] */
+ static const u32 itr_imr_rxr_msk[32] = {
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
+ };
+
+/* lower bit position of bitfield imr_rx{r}[4:0] */
+ static const u32 itr_imr_rxr_shift[32] = {
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx],
+ itr_imr_rxr_msk[rx],
+ itr_imr_rxr_shift[rx],
+ irq_map_rx);
+}
+
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}[4:0] */
+ static const u32 itr_imr_txt_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
+ };
+
+/* bitmask for bitfield imr_tx{t}[4:0] */
+ static const u32 itr_imr_txt_msk[32] = {
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}[4:0] */
+ static const u32 itr_imr_txt_shift[32] = {
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx],
+ itr_imr_txt_msk[tx],
+ itr_imr_txt_shift[tx],
+ irq_map_tx);
+}
+
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
+}
+
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
+}
+
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+ HW_ATL_ITR_REG_RES_DSBL_MSK,
+ HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
+}
+
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
+}
+
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
+}
+
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT);
+}
+
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT, res_irq);
+}
+
+/* rdm */
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+ HW_ATL_RDM_DCADCPUID_MSK,
+ HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
+}
+
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+ HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
+}
+
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+ HW_ATL_RDM_DCA_MODE_MSK,
+ HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
+}
+
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+ HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
+ rx_desc_data_buff_size);
+}
+
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_RDM_DCADDESC_EN_MSK,
+ HW_ATL_RDM_DCADDESC_EN_SHIFT,
+ rx_desc_dca_en);
+}
+
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDEN_MSK,
+ HW_ATL_RDM_DESCDEN_SHIFT,
+ rx_desc_en);
+}
+
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+ HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
+ rx_desc_head_buff_size);
+}
+
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+ HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
+ rx_desc_head_splitting);
+}
+
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+ HW_ATL_RDM_DESCDHD_MSK,
+ HW_ATL_RDM_DESCDHD_SHIFT);
+}
+
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
+ rx_desc_len);
+}
+
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+ HW_ATL_RDM_DESCDRESET_MSK,
+ HW_ATL_RDM_DESCDRESET_SHIFT,
+ rx_desc_res);
+}
+
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
+ rx_desc_wr_wb_irq_en);
+}
+
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+ HW_ATL_RDM_DCADHDR_EN_MSK,
+ HW_ATL_RDM_DCADHDR_EN_SHIFT,
+ rx_head_dca_en);
+}
+
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+ HW_ATL_RDM_DCADPAY_EN_MSK,
+ HW_ATL_RDM_DCADPAY_EN_SHIFT,
+ rx_pld_dca_en);
+}
+
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+ HW_ATL_RDM_INT_RIM_EN_MSK,
+ HW_ATL_RDM_INT_RIM_EN_SHIFT,
+ rdm_intr_moder_en);
+}
+
+/* reg */
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
+}
+
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
+}
+
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
+}
+
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
+}
+
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
+ rx_dma_desc_base_addrlsw);
+}
+
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
+ rx_dma_desc_base_addrmsw);
+}
+
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
+}
+
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
+ rx_dma_desc_tail_ptr);
+}
+
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+ rx_flr_mcst_flr_msk);
+}
+
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+ rx_flr_mcst_flr);
+}
+
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+ rx_flr_rss_control1);
+}
+
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+ u32 rx_filter_control2)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
+}
+
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
+ rx_intr_moderation_ctl);
+}
+
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
+}
+
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
+ tx_dma_desc_base_addrlsw);
+}
+
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
+ tx_dma_desc_base_addrmsw);
+}
+
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor)
+{
+ rte_wmb();
+
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
+ tx_dma_desc_tail_ptr);
+}
+
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
+ tx_intr_moderation_ctl);
+}
+
+/* RPB: rx packet buffer */
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+ HW_ATL_RPB_DMA_SYS_LBK_MSK,
+ HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
+}
+
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
+ rx_traf_class_mode);
+}
+
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
+}
+
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+ HW_ATL_RPB_RX_BUF_EN_MSK,
+ HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
+}
+
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBHI_THRESH_MSK,
+ HW_ATL_RPB_RXBHI_THRESH_SHIFT,
+ rx_buff_hi_threshold_per_tc);
+}
+
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBLO_THRESH_MSK,
+ HW_ATL_RPB_RXBLO_THRESH_SHIFT,
+ rx_buff_lo_threshold_per_tc);
+}
+
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_flow_ctl_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+ HW_ATL_RPB_RX_FC_MODE_MSK,
+ HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
+}
+
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+ HW_ATL_RPB_RXBBUF_SIZE_MSK,
+ HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
+ rx_pkt_buff_size_per_tc);
+}
+
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+ HW_ATL_RPB_RXBXOFF_EN_MSK,
+ HW_ATL_RPB_RXBXOFF_EN_SHIFT,
+ rx_xoff_en_per_tc);
+}
+
+/* rpf */
+
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+ HW_ATL_RPFL2BC_THRESH_MSK,
+ HW_ATL_RPFL2BC_THRESH_SHIFT,
+ l2broadcast_count_threshold);
+}
+
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+ HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
+}
+
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+ HW_ATL_RPFL2BC_ACT_MSK,
+ HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
+}
+
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+ HW_ATL_RPFL2MC_ENF_MSK,
+ HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
+}
+
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT,
+ l2promiscuous_mode_en);
+}
+
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+ HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
+ l2unicast_flr_act);
+}
+
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+ HW_ATL_RPFL2UC_ENF_MSK,
+ HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
+}
+
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
+ l2unicast_dest_addresslsw);
+}
+
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+ HW_ATL_RPFL2UC_DAFMSW_MSK,
+ HW_ATL_RPFL2UC_DAFMSW_SHIFT,
+ l2unicast_dest_addressmsw);
+}
+
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
+ l2_accept_all_mc_packets);
+}
+
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc)
+{
+/* register address for bitfield rx_tc_up{t}[2:0] */
+ static const u32 rpf_rpb_rx_tc_upt_adr[8] = {
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
+ };
+
+/* bitmask for bitfield rx_tc_up{t}[2:0] */
+ static const u32 rpf_rpb_rx_tc_upt_msk[8] = {
+ 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
+ 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
+ };
+
+/* lower bit position of bitfield rx_tc_up{t}[2:0] */
+ static const u32 rpf_rpb_rx_tc_upt_shft[8] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
+ rpf_rpb_rx_tc_upt_msk[tc],
+ rpf_rpb_rx_tc_upt_shft[tc],
+ user_priority_tc_map);
+}
+
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+ HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+ HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
+ rss_key_addr);
+}
+
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
+ rss_key_wr_data);
+}
+
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
+}
+
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
+ rss_key_wr_en);
+}
+
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+ HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+ HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+ rss_redir_tbl_addr);
+}
+
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
+ rss_redir_tbl_wr_data);
+}
+
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
+}
+
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
+}
+
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
+ tpo_to_rpf_sys_lbk);
+}
+
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+ HW_ATL_RPF_VL_INNER_TPID_MSK,
+ HW_ATL_RPF_VL_INNER_TPID_SHIFT,
+ vlan_inner_etht);
+}
+
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+ HW_ATL_RPF_VL_OUTER_TPID_MSK,
+ HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
+ vlan_outer_etht);
+}
+
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
+ vlan_prom_mode_en);
+}
+
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+ vlan_acc_untagged_packets);
+}
+
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
+ vlan_untagged_act);
+}
+
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_EN_F_MSK,
+ HW_ATL_RPF_VL_EN_F_SHIFT,
+ vlan_flr_en);
+}
+
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+ HW_ATL_RPF_VL_ACT_F_MSK,
+ HW_ATL_RPF_VL_ACT_F_SHIFT,
+ vlan_flr_act);
+}
+
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+ HW_ATL_RPF_VL_ID_F_MSK,
+ HW_ATL_RPF_VL_ID_F_SHIFT,
+ vlan_id_flr);
+}
+
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+ HW_ATL_RPF_ET_ENF_MSK,
+ HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
+}
+
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+ HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
+ etht_user_priority_en);
+}
+
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+ HW_ATL_RPF_ET_RXQFEN_MSK,
+ HW_ATL_RPF_ET_RXQFEN_SHIFT,
+ etht_rx_queue_en);
+}
+
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+ HW_ATL_RPF_ET_UPF_MSK,
+ HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
+}
+
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_RXQF_MSK,
+ HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
+}
+
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_MNG_RXQF_MSK,
+ HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
+ etht_mgt_queue);
+}
+
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+ HW_ATL_RPF_ET_ACTF_MSK,
+ HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
+}
+
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+ HW_ATL_RPF_ET_VALF_MSK,
+ HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
+}
+
+/* RPO: rx packet offload */
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+ HW_ATL_RPO_IPV4CHK_EN_MSK,
+ HW_ATL_RPO_IPV4CHK_EN_SHIFT,
+ ipv4header_crc_offload_en);
+}
+
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+ HW_ATL_RPO_DESCDVL_STRIP_MSK,
+ HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
+ rx_desc_vlan_stripping);
+}
+
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+ HW_ATL_RPOL4CHK_EN_MSK,
+ HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
+}
+
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
+}
+
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+ HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+ HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
+ lro_patch_optimization_en);
+}
+
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+ HW_ATL_RPO_LRO_QSES_LMT_MSK,
+ HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
+ lro_qsessions_lim);
+}
+
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
+ lro_total_desc_lim);
+}
+
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+ HW_ATL_RPO_LRO_PKT_MIN_MSK,
+ HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
+ lro_min_pld_of_first_pkt);
+}
+
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
+}
+
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
+{
+/* Register address for bitfield lro{L}_des_max[1:0] */
+ static const u32 rpo_lro_ldes_max_adr[32] = {
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
+ };
+
+/* Bitmask for bitfield lro{L}_des_max[1:0] */
+ static const u32 rpo_lro_ldes_max_msk[32] = {
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
+ };
+
+/* Lower bit position of bitfield lro{L}_des_max[1:0] */
+ static const u32 rpo_lro_ldes_max_shift[32] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro],
+ rpo_lro_ldes_max_msk[lro],
+ rpo_lro_ldes_max_shift[lro],
+ lro_max_number_of_descriptors);
+}
+
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+ HW_ATL_RPO_LRO_TB_DIV_MSK,
+ HW_ATL_RPO_LRO_TB_DIV_SHIFT,
+ lro_time_base_divider);
+}
+
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+ HW_ATL_RPO_LRO_INA_IVAL_MSK,
+ HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
+ lro_inactive_interval);
+}
+
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+ HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+ HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+ lro_max_coal_interval);
+}
+
+/* rx */
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+ HW_ATL_RX_REG_RES_DSBL_MSK,
+ HW_ATL_RX_REG_RES_DSBL_SHIFT,
+ rx_reg_res_dis);
+}
+
+/* tdm */
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+ HW_ATL_TDM_DCADCPUID_MSK,
+ HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
+}
+
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
+}
+
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+ HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
+}
+
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+ HW_ATL_TDM_DCA_MODE_MSK,
+ HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
+}
+
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_TDM_DCADDESC_EN_MSK,
+ HW_ATL_TDM_DCADDESC_EN_SHIFT,
+ tx_desc_dca_en);
+}
+
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDEN_MSK,
+ HW_ATL_TDM_DESCDEN_SHIFT,
+ tx_desc_en);
+}
+
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+ HW_ATL_TDM_DESCDHD_MSK,
+ HW_ATL_TDM_DESCDHD_SHIFT);
+}
+
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDLEN_MSK,
+ HW_ATL_TDM_DESCDLEN_SHIFT,
+ tx_desc_len);
+}
+
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
+ tx_desc_wr_wb_irq_en);
+}
+
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+ HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+ HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
+ tx_desc_wr_wb_threshold);
+}
+
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+ HW_ATL_TDM_INT_MOD_EN_MSK,
+ HW_ATL_TDM_INT_MOD_EN_SHIFT,
+ tdm_irq_moderation_en);
+}
+
+/* thm */
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
+ lso_tcp_flag_of_first_pkt);
+}
+
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
+ lso_tcp_flag_of_last_pkt);
+}
+
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
+ lso_tcp_flag_of_middle_pkt);
+}
+
+/* TPB: tx packet buffer */
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+ HW_ATL_TPB_TX_BUF_EN_MSK,
+ HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
+}
+
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT);
+}
+
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT,
+ tx_traf_class_mode);
+}
+
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBHI_THRESH_MSK,
+ HW_ATL_TPB_TXBHI_THRESH_SHIFT,
+ tx_buff_hi_threshold_per_tc);
+}
+
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBLO_THRESH_MSK,
+ HW_ATL_TPB_TXBLO_THRESH_SHIFT,
+ tx_buff_lo_threshold_per_tc);
+}
+
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+ HW_ATL_TPB_DMA_SYS_LBK_MSK,
+ HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
+ tx_dma_sys_lbk_en);
+}
+
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+ HW_ATL_TPB_TXBBUF_SIZE_MSK,
+ HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
+ tx_pkt_buff_size_per_tc);
+}
+
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_path_scp_ins_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+ HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+ HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
+ tx_path_scp_ins_en);
+}
+
+/* TPO: tx packet offload */
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+ HW_ATL_TPO_IPV4CHK_EN_MSK,
+ HW_ATL_TPO_IPV4CHK_EN_SHIFT,
+ ipv4header_crc_offload_en);
+}
+
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+ HW_ATL_TPOL4CHK_EN_MSK,
+ HW_ATL_TPOL4CHK_EN_SHIFT,
+ tcp_udp_crc_offload_en);
+}
+
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+ HW_ATL_TPO_PKT_SYS_LBK_MSK,
+ HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
+ tx_pkt_sys_lbk_en);
+}
+
+/* TPS: tx packet scheduler */
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
+ tx_pkt_shed_data_arb_mode);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+ HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+ HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
+ curr_time_res);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+ HW_ATL_TPS_DESC_RATE_LIM_MSK,
+ HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
+ tx_pkt_shed_desc_rate_lim);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+ arb_mode);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
+ tx_pkt_shed_desc_tc_weight);
+}
+
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+ arb_mode);
+}
+
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
+ tx_pkt_shed_tc_data_weight);
+}
+
+/* tx */
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+ HW_ATL_TX_REG_RES_DSBL_MSK,
+ HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
+}
+
+/* msm */
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+ HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+ HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
+}
+
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+ HW_ATL_MSM_REG_ADDR_MSK,
+ HW_ATL_MSM_REG_ADDR_SHIFT,
+ reg_addr_for_indirect_addr);
+}
+
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+ HW_ATL_MSM_REG_RD_STROBE_MSK,
+ HW_ATL_MSM_REG_RD_STROBE_SHIFT,
+ reg_rd_strobe);
+}
+
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
+}
+
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
+}
+
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+ HW_ATL_MSM_REG_WR_STROBE_MSK,
+ HW_ATL_MSM_REG_WR_STROBE_SHIFT,
+ reg_wr_strobe);
+}
+
+/* pci */
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+ HW_ATL_PCI_REG_RES_DSBL_MSK,
+ HW_ATL_PCI_REG_RES_DSBL_SHIFT,
+ pci_reg_res_dis);
+}
+
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
+ glb_cpu_scratch_scp);
+}
+
+void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR,
+ HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK,
+ HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT, up_force_intr);
+}
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/atlantic/hw_atl/hw_atl_llh.h
new file mode 100644
index 00000000..e30083ce
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_llh.h
@@ -0,0 +1,714 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_H
+#define HW_ATL_LLH_H
+
+#include "../atl_types.h"
+
+struct aq_hw_s;
+
+/* global */
+
+/* set global microprocessor semaphore */
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
+
+/* get global microprocessor semaphore */
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+
+/* set global register reset disable */
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+
+/* set soft reset */
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+
+/* get soft reset */
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
+
+/* stats */
+
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter lsw */
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter lsw */
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter lsw */
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter lsw */
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter msw */
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter msw */
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter msw */
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter msw */
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx errors counter register */
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast frames counter register */
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx multicast frames counter register */
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast frames counter register */
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast octets counter register 1 */
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast octets counter register 0 */
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get rx dma statistics counter 7 */
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+
+/* get msm tx errors counter register */
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast frames counter register */
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast frames counter register */
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast frames counter register */
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast octets counter register 1 */
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast octets counter register 1 */
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast octets counter register 0 */
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get global mif identification */
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+
+/* interrupt */
+
+/* set interrupt auto mask lsw */
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw);
+
+/* set interrupt mapping enable rx */
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx);
+
+/* set interrupt mapping enable tx */
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx);
+
+/* set interrupt mapping rx */
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+
+/* set interrupt mapping tx */
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+
+/* set interrupt mask clear lsw */
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw);
+
+/* set interrupt mask set lsw */
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+
+/* set interrupt register reset disable */
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+
+/* set interrupt status clear lsw */
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
+
+/* get interrupt status lsw */
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+
+/* get reset interrupt */
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
+
+/* set reset interrupt */
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+
+/* rdm */
+
+/* set cpu id */
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set rx dca enable */
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+
+/* set rx dca mode */
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+
+/* set rx descriptor data buffer size */
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor dca enable */
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
+
+/* set rx descriptor enable */
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
+
+/* set rx descriptor header splitting */
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor);
+
+/* get rx descriptor head pointer */
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx descriptor length */
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
+
+/* set rx descriptor write-back interrupt enable */
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
+
+/* set rx header dca enable */
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
+
+/* set rx payload dca enable */
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca);
+
+/* set rx descriptor header buffer size */
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor reset */
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
+
+/* Set RDM Interrupt Moderation Enable */
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en);
+
+/* reg */
+
+/* set general interrupt mapping register */
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx);
+
+/* get general interrupt status register */
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+
+/* set interrupt global control register */
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+
+/* set interrupt throttle register */
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+
+/* set rx dma descriptor base address lsw */
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set rx dma descriptor base address msw */
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* get rx dma descriptor status register */
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx dma descriptor tail pointer register */
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* set rx filter multicast filter mask register */
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
+
+/* set rx filter multicast filter register */
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
+
+/* set rx filter rss control register 1 */
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
+
+/* Set RX Filter Control Register 2 */
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+
+/* Set RX Interrupt Moderation Control Register */
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue);
+
+/* set tx dma debug control */
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl);
+
+/* set tx dma descriptor base address lsw */
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set tx dma descriptor base address msw */
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* set tx dma descriptor tail pointer register */
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* Set TX Interrupt Moderation Control Register */
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/* set global microprocessor scratch pad */
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp);
+
+/* rpb */
+
+/* set dma system loopback */
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+
+/* set rx traffic class mode */
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
+
+/* get rx traffic class mode */
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
+
+/* set rx buffer enable */
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+
+/* set rx buffer high threshold (per tc) */
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set rx buffer low threshold (per tc) */
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set rx flow control mode */
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_flow_ctl_mode);
+
+/* set rx packet buffer size (per tc) */
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
+
+/* set rx xoff enable (per tc) */
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc,
+ u32 buffer);
+
+/* rpf */
+
+/* set l2 broadcast count threshold */
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
+
+/* set l2 broadcast enable */
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+
+/* set l2 broadcast filter action */
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
+
+/* set l2 multicast filter enable */
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter);
+
+/* set l2 promiscuous mode enable */
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
+
+/* set l2 unicast filter action */
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter);
+
+/* set l2 unicast filter enable */
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
+
+/* set l2 unicast destination address lsw */
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter);
+
+/* set l2 unicast destination address msw */
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter);
+
+/* Set L2 Accept all Multicast packets */
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
+
+/* set user-priority tc mapping */
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
+
+/* set rss key address */
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+
+/* set rss key write data */
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+
+/* get rss key write enable */
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss key write enable */
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+
+/* set rss redirection table address */
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
+
+/* set rss redirection table write data */
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
+
+/* get rss redirection write enable */
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss redirection write enable */
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+
+/* set tpo to rpf system loopback */
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
+
+/* set vlan inner ethertype */
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+
+/* set vlan outer ethertype */
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+
+/* set vlan promiscuous mode enable */
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en);
+
+/* Set VLAN untagged action */
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act);
+
+/* Set VLAN accept untagged packets */
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets);
+
+/* Set VLAN filter enable */
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter);
+
+/* Set VLAN Filter Action */
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
+
+/* Set VLAN ID Filter */
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter);
+
+/* set ethertype filter enable */
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter);
+
+/* set ethertype user-priority enable */
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en,
+ u32 filter);
+
+/* set ethertype rx queue enable */
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter);
+
+/* set ethertype rx queue */
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
+
+/* set ethertype user-priority */
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter);
+
+/* set ethertype management queue */
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
+
+/* set ethertype filter action */
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
+
+/* set ethertype filter */
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+
+/* rpo */
+
+/* set ipv4 header checksum offload enable */
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set rx descriptor vlan stripping */
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
+
+/* set tcp/udp checksum offload enable */
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* Set LRO Patch Optimization Enable. */
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
+
+/* Set Large Receive Offload Enable */
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+
+/* Set LRO Q Sessions Limit */
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim);
+
+/* Set LRO Total Descriptor Limit */
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim);
+
+/* Set LRO Min Payload of First Packet */
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
+
+/* Set LRO Packet Limit */
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+
+/* Set LRO Max Number of Descriptors */
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
+
+/* Set LRO Time Base Divider */
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
+
+/*Set LRO Inactive Interval */
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
+
+/*Set LRO Max Coalescing Interval */
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval);
+
+/* rx */
+
+/* set rx register reset disable */
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+
+/* tdm */
+
+/* set cpu id */
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set large send offload enable */
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
+
+/* set tx descriptor enable */
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor);
+
+/* set tx dca enable */
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+
+/* set tx dca mode */
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+
+/* set tx descriptor dca enable */
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca);
+
+/* get tx descriptor head pointer */
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set tx descriptor length */
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
+
+/* set tx descriptor write-back interrupt enable */
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
+
+/* set tx descriptor write-back threshold */
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor);
+
+/* Set TDM Interrupt Moderation Enable */
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
+/* thm */
+
+/* set lso tcp flag of first packet */
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
+
+/* set lso tcp flag of last packet */
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
+
+/* set lso tcp flag of middle packet */
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
+
+/* tpb */
+
+/* set TX Traffic Class Mode */
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode);
+
+/* get TX Traffic Class Mode */
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+
+/* set tx buffer enable */
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+
+/* set tx buffer high threshold (per tc) */
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set tx buffer low threshold (per tc) */
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set tx dma system loopback enable */
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_sys_lbk_en);
+
+/* set tx packet buffer size (per tc) */
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc,
+ u32 buffer);
+
+/* set tx path pad insert enable */
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_path_scp_ins_en);
+
+/* tpo */
+
+/* set ipv4 header checksum offload enable */
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set tcp/udp checksum offload enable */
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* set tx pkt system loopback enable */
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en);
+
+/* tps */
+
+/* set tx packet scheduler data arbitration mode */
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
+
+/* set tx packet scheduler descriptor rate current time reset */
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
+
+/* set tx packet scheduler descriptor rate limit */
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
+
+/* set tx packet scheduler descriptor tc arbitration mode */
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
+
+/* set tx packet scheduler descriptor tc max credit */
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc);
+
+/* set tx packet scheduler descriptor tc weight */
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc);
+
+/* set tx packet scheduler descriptor vm arbitration mode */
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc);
+
+/* set tx packet scheduler tc data weight */
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc);
+
+/* tx */
+
+/* set tx register reset disable */
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+
+/* msm */
+
+/* get register access status */
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+
+/* set register address for indirect address */
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
+
+/* set register read strobe */
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+
+/* get register read data */
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+
+/* set register write data */
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+
+/* set register write strobe */
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+
+/* pci */
+
+/* set pci register reset disable */
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+
+/* set uP Force Interrupt */
+void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+
+
+#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h
new file mode 100644
index 00000000..27b9b9cb
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -0,0 +1,2407 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_llh_internal.h: Preprocessor definitions
+ * for Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_INTERNAL_H
+#define HW_ATL_LLH_INTERNAL_H
+
+/* global microprocessor semaphore definitions
+ * base address: 0x000003a0
+ * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
+ */
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4)
+/* register address for bitfield rx dma good octet counter lsw [1f:0] */
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
+/* register address for bitfield rx dma good packet counter lsw [1f:0] */
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
+/* register address for bitfield tx dma good octet counter lsw [1f:0] */
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
+/* register address for bitfield tx dma good packet counter lsw [1f:0] */
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
+
+/* register address for bitfield rx dma good octet counter msw [3f:20] */
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
+/* register address for bitfield rx dma good packet counter msw [3f:20] */
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
+/* register address for bitfield tx dma good octet counter msw [3f:20] */
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
+/* register address for bitfield tx dma good packet counter msw [3f:20] */
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
+
+/* preprocessor definitions for msm rx errors counter register */
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
+
+/* preprocessor definitions for msm rx unicast frames counter register */
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
+
+/* preprocessor definitions for msm rx multicast frames counter register */
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
+
+/* preprocessor definitions for msm rx broadcast frames counter register */
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 1 */
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 2 */
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
+
+/* preprocessor definitions for msm rx unicast octets counter register 0 */
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
+
+/* preprocessor definitions for msm tx unicast frames counter register */
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
+
+/* preprocessor definitions for msm tx multicast frames counter register */
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
+
+/* preprocessor definitions for global mif identification */
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
+
+/* register address for bitfield iamr_lsw[1f:0] */
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
+/* register address for bitfield rx dma drop packet counter [1f:0] */
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
+
+/* register address for bitfield imcr_lsw[1f:0] */
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
+/* register address for bitfield imsr_lsw[1f:0] */
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
+/* register address for bitfield itr_reg_res_dsbl */
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
+/* bitmask for bitfield itr_reg_res_dsbl */
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
+/* lower bit position of bitfield itr_reg_res_dsbl */
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
+/* register address for bitfield iscr_lsw[1f:0] */
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
+/* register address for bitfield isr_lsw[1f:0] */
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
+/* register address for bitfield itr_reset */
+#define HW_ATL_ITR_RES_ADR 0x00002300
+/* bitmask for bitfield itr_reset */
+#define HW_ATL_ITR_RES_MSK 0x80000000
+/* lower bit position of bitfield itr_reset */
+#define HW_ATL_ITR_RES_SHIFT 31
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
+/* register address for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
+
+/* rx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_rdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
+/* bitmask for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
+/* width of bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
+/* default value of bitfield dca_en */
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
+
+/* rx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_rdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
+/* bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
+/* width of bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
+/* default value of bitfield dca_mode[3:0] */
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
+
+/* rx desc{d}_data_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_data_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
+/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
+/* lower bit position of bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
+/* width of bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
+/* default value of bitfield desc{d}_data_size[4:0] */
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
+
+/* rx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
+/* width of bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
+/* default value of bitfield dca{d}_desc_en */
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
+
+/* rx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
+/* width of bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
+/* default value of bitfield desc{d}_en */
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
+
+/* rx desc{d}_hdr_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_hdr_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
+/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
+/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
+/* width of bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
+/* default value of bitfield desc{d}_hdr_size[4:0] */
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
+
+/* rx desc{d}_hdr_split bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_split".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_hdr_split_i[0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
+/* inverted bitmask for bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
+/* lower bit position of bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
+/* width of bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
+/* default value of bitfield desc{d}_hdr_split */
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
+
+/* rx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="rdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
+
+/* rx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
+/* width of bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
+
+/* rx desc{d}_reset bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_reset".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_q_pf_res_i[0]"
+ */
+
+/* register address for bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
+/* inverted bitmask for bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
+/* lower bit position of bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
+/* width of bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
+/* default value of bitfield desc{d}_reset */
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
+
+/* rx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_rdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
+/* bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
+/* lower bit position of bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
+/* width of bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
+/* default value of bitfield int_desc_wrb_en */
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
+
+/* rx dca{d}_hdr_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_hdr_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_hdr_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
+/* inverted bitmask for bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
+/* lower bit position of bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
+/* width of bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
+/* default value of bitfield dca{d}_hdr_en */
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
+
+/* rx dca{d}_pay_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_pay_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_pay_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
+/* inverted bitmask for bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
+/* lower bit position of bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
+/* width of bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
+/* default value of bitfield dca{d}_pay_en */
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
+
+/* RX rdm_int_rim_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rdm_int_rim_en".
+ * PORT="pif_rdm_int_rim_en_i"
+ */
+
+/* Register address for bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
+/* Bitmask for bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
+/* Inverted bitmask for bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
+/* Lower bit position of bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
+/* Width of bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
+/* Default value of bitfield rdm_int_rim_en */
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
+
+/* general interrupt mapping register definitions
+ * preprocessor definitions for general interrupt mapping register
+ * base address: 0x00002180
+ * parameter: regidx {f} | stride size 0x4 | range [0, 3]
+ */
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
+
+/* general interrupt status register definitions
+ * preprocessor definitions for general interrupt status register
+ * address: 0x000021A0
+ */
+
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
+
+/* interrupt global control register definitions
+ * preprocessor definitions for interrupt global control register
+ * address: 0x00002300
+ */
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
+
+/* interrupt throttle register definitions
+ * preprocessor definitions for interrupt throttle register
+ * base address: 0x00002800
+ * parameter: throttle {t} | stride size 0x4 | range [0, 31]
+ */
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
+
+/* rx dma descriptor base address lsw definitions
+ * preprocessor definitions for rx dma descriptor base address lsw
+ * base address: 0x00005b00
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
+(0x00005b00u + (descriptor) * 0x20)
+
+/* rx dma descriptor base address msw definitions
+ * preprocessor definitions for rx dma descriptor base address msw
+ * base address: 0x00005b04
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
+(0x00005b04u + (descriptor) * 0x20)
+
+/* rx dma descriptor status register definitions
+ * preprocessor definitions for rx dma descriptor status register
+ * base address: 0x00005b14
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+ (0x00005b14u + (descriptor) * 0x20)
+
+/* rx dma descriptor tail pointer register definitions
+ * preprocessor definitions for rx dma descriptor tail pointer register
+ * base address: 0x00005b10
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00005b10u + (descriptor) * 0x20)
+
+/* rx interrupt moderation control register definitions
+ * Preprocessor definitions for RX Interrupt Moderation Control Register
+ * Base Address: 0x00005A40
+ * Parameter: RIM {R} | stride size 0x4 | range [0, 31]
+ */
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
+
+/* rx filter multicast filter mask register definitions
+ * preprocessor definitions for rx filter multicast filter mask register
+ * address: 0x00005270
+ */
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
+
+/* rx filter multicast filter register definitions
+ * preprocessor definitions for rx filter multicast filter register
+ * base address: 0x00005250
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ */
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
+
+/* RX Filter RSS Control Register 1 Definitions
+ * Preprocessor definitions for RX Filter RSS Control Register 1
+ * Address: 0x000054C0
+ */
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
+
+/* RX Filter Control Register 2 Definitions
+ * Preprocessor definitions for RX Filter Control Register 2
+ * Address: 0x00005104
+ */
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
+
+/* tx tx dma debug control [1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
+ * port="pif_tdm_debug_cntl_i[31:0]"
+ */
+
+/* register address for bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
+/* bitmask for bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
+/* inverted bitmask for bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
+/* lower bit position of bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
+/* width of bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
+/* default value of bitfield tx dma debug control [1f:0] */
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
+
+/* tx dma descriptor base address lsw definitions
+ * preprocessor definitions for tx dma descriptor base address lsw
+ * base address: 0x00007c00
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
+ (0x00007c00u + (descriptor) * 0x40)
+
+/* tx dma descriptor tail pointer register definitions
+ * preprocessor definitions for tx dma descriptor tail pointer register
+ * base address: 0x00007c10
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00007c10u + (descriptor) * 0x40)
+
+/* rx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_rpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
+/* bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
+/* width of bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
+/* default value of bitfield dma_sys_loopback */
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
+
+/* rx rx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "rx_tc_mode".
+ * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
+ */
+
+/* register address for bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
+/* bitmask for bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
+/* lower bit position of bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
+/* width of bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
+/* default value of bitfield rx_tc_mode */
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
+
+/* rx rx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx_buf_en".
+ * port="pif_rpb_rx_buf_en_i"
+ */
+
+/* register address for bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
+/* bitmask for bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
+/* inverted bitmask for bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
+/* width of bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
+/* default value of bitfield rx_buf_en */
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
+
+/* rx rx{b}_hi_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_hi_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
+/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
+/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
+/* width of bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
+/* default value of bitfield rx{b}_hi_thresh[d:0] */
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
+
+/* rx rx{b}_lo_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_lo_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
+/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
+/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
+/* width of bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
+/* default value of bitfield rx{b}_lo_thresh[d:0] */
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
+
+/* rx rx_fc_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
+ * port="pif_rpb_rx_fc_mode_i[1:0]"
+ */
+
+/* register address for bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
+/* bitmask for bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
+/* inverted bitmask for bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
+/* lower bit position of bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
+/* width of bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
+/* default value of bitfield rx_fc_mode[1:0] */
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
+
+/* rx rx{b}_buf_size[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_buf_size_i[8:0]"
+ */
+
+/* register address for bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
+/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
+/* lower bit position of bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
+/* width of bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
+/* default value of bitfield rx{b}_buf_size[8:0] */
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
+
+/* rx rx{b}_xoff_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_xoff_en".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx_xoff_en_i[0]"
+ */
+
+/* register address for bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
+/* inverted bitmask for bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
+/* width of bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
+/* default value of bitfield rx{b}_xoff_en */
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
+
+/* rx l2_bc_thresh[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
+ * port="pif_rpf_l2_bc_thresh_i[15:0]"
+ */
+
+/* register address for bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
+/* bitmask for bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
+/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
+/* lower bit position of bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
+/* width of bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
+/* default value of bitfield l2_bc_thresh[f:0] */
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
+
+/* rx l2_bc_en bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_en".
+ * port="pif_rpf_l2_bc_en_i"
+ */
+
+/* register address for bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
+/* bitmask for bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
+/* inverted bitmask for bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
+/* width of bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
+/* default value of bitfield l2_bc_en */
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
+
+/* rx l2_bc_act[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_act[2:0]".
+ * port="pif_rpf_l2_bc_act_i[2:0]"
+ */
+
+/* register address for bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
+/* bitmask for bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
+/* inverted bitmask for bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
+/* lower bit position of bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
+/* width of bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
+/* default value of bitfield l2_bc_act[2:0] */
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
+
+/* rx l2_mc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_mc_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ * port="pif_rpf_l2_mc_en_i[0]"
+ */
+
+/* register address for bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
+/* bitmask for bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
+/* inverted bitmask for bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
+/* lower bit position of bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
+/* width of bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
+/* default value of bitfield l2_mc_en{f} */
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
+
+/* rx l2_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "l2_promis_mode".
+ * port="pif_rpf_l2_promis_mode_i"
+ */
+
+/* register address for bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
+/* bitmask for bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
+/* inverted bitmask for bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
+/* lower bit position of bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
+/* width of bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
+/* default value of bitfield l2_promis_mode */
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
+
+/* rx l2_uc_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_act0_i[2:0]"
+ */
+
+/* register address for bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
+/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
+/* lower bit position of bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
+/* width of bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
+/* default value of bitfield l2_uc_act{f}[2:0] */
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
+
+/* rx l2_uc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_en{f}".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_en_i[0]"
+ */
+
+/* register address for bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
+/* inverted bitmask for bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
+/* lower bit position of bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
+/* width of bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
+/* default value of bitfield l2_uc_en{f} */
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
+
+/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
+/* register address for bitfield l2_uc_da{f}_msw[f:0] */
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
+/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
+
+/* rx l2_mc_accept_all bitfield definitions
+ * Preprocessor definitions for the bitfield "l2_mc_accept_all".
+ * PORT="pif_rpf_l2_mc_all_accept_i"
+ */
+
+/* Register address for bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
+/* Bitmask for bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
+/* Inverted bitmask for bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
+/* Lower bit position of bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
+/* Width of bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
+/* Default value of bitfield l2_mc_accept_all */
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
+
+/* width of bitfield rx_tc_up{t}[2:0] */
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
+/* default value of bitfield rx_tc_up{t}[2:0] */
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
+
+/* rx rss_key_addr[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_addr[4:0]".
+ * port="pif_rpf_rss_key_addr_i[4:0]"
+ */
+
+/* register address for bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
+/* bitmask for bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
+/* inverted bitmask for bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
+/* lower bit position of bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
+/* width of bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
+/* default value of bitfield rss_key_addr[4:0] */
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
+
+/* rx rss_key_wr_data[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
+ * port="pif_rpf_rss_key_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
+/* bitmask for bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
+/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
+/* lower bit position of bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
+/* width of bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
+/* default value of bitfield rss_key_wr_data[1f:0] */
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
+
+/* rx rss_key_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_en_i".
+ * port="pif_rpf_rss_key_wr_en_i"
+ */
+
+/* register address for bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
+/* bitmask for bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
+/* inverted bitmask for bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
+/* lower bit position of bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
+/* width of bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
+/* default value of bitfield rss_key_wr_en_i */
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
+
+/* rx rss_redir_addr[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
+ * port="pif_rpf_rss_redir_addr_i[3:0]"
+ */
+
+/* register address for bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
+/* bitmask for bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
+/* inverted bitmask for bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
+/* lower bit position of bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
+/* width of bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
+/* default value of bitfield rss_redir_addr[3:0] */
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
+
+/* rx rss_redir_wr_data[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
+ * port="pif_rpf_rss_redir_wr_data_i[15:0]"
+ */
+
+/* register address for bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
+/* bitmask for bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
+/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
+/* lower bit position of bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
+/* width of bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
+/* default value of bitfield rss_redir_wr_data[f:0] */
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
+
+/* rx rss_redir_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_en_i".
+ * port="pif_rpf_rss_redir_wr_en_i"
+ */
+
+/* register address for bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
+/* bitmask for bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
+/* inverted bitmask for bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
+/* lower bit position of bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
+/* width of bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
+/* default value of bitfield rss_redir_wr_en_i */
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
+
+/* rx tpo_rpf_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
+ * port="pif_rpf_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
+/* bitmask for bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
+/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
+/* lower bit position of bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
+/* width of bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
+/* default value of bitfield tpo_rpf_sys_loopback */
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
+
+/* rx vl_inner_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
+ * port="pif_rpf_vl_inner_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
+/* bitmask for bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
+/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
+/* lower bit position of bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
+/* width of bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
+/* default value of bitfield vl_inner_tpid[f:0] */
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
+
+/* rx vl_outer_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
+ * port="pif_rpf_vl_outer_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
+/* bitmask for bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
+/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
+/* lower bit position of bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
+/* width of bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
+/* default value of bitfield vl_outer_tpid[f:0] */
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
+
+/* rx vl_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "vl_promis_mode".
+ * port="pif_rpf_vl_promis_mode_i"
+ */
+
+/* register address for bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
+/* bitmask for bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
+/* inverted bitmask for bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
+/* lower bit position of bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
+/* width of bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
+/* default value of bitfield vl_promis_mode */
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
+
+/* RX vl_accept_untagged_mode Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
+ * PORT="pif_rpf_vl_accept_untagged_i"
+ */
+
+/* Register address for bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
+/* Bitmask for bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
+/* Inverted bitmask for bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
+/* Lower bit position of bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
+/* Width of bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
+/* Default value of bitfield vl_accept_untagged_mode */
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
+
+/* rX vl_untagged_act[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
+ * PORT="pif_rpf_vl_untagged_act_i[2:0]"
+ */
+
+/* Register address for bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
+/* Bitmask for bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
+/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
+/* Lower bit position of bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
+/* Width of bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
+/* Default value of bitfield vl_untagged_act[2:0] */
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
+
+/* RX vl_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_en_i[0]"
+ */
+
+/* Register address for bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
+/* Inverted bitmask for bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
+/* Width of bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
+/* Default value of bitfield vl_en{F} */
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
+
+/* RX vl_act{F}[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_act0_i[2:0]"
+ */
+
+/* Register address for bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
+/* Inverted bitmask for bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
+/* Lower bit position of bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
+/* Width of bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
+/* Default value of bitfield vl_act{F}[2:0] */
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
+
+/* RX vl_id{F}[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_id0_i[11:0]"
+ */
+
+/* Register address for bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
+/* Inverted bitmask for bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
+/* Lower bit position of bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
+/* Width of bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
+/* Default value of bitfield vl_id{F}[B:0] */
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
+
+/* RX et_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "et_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_et_en_i[0]"
+ */
+
+/* Register address for bitfield et_en{F} */
+#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* Bitmask for bitfield et_en{F} */
+#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
+/* Inverted bitmask for bitfield et_en{F} */
+#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield et_en{F} */
+#define HW_ATL_RPF_ET_EN_F_SHIFT 31
+/* Width of bitfield et_en{F} */
+#define HW_ATL_RPF_ET_EN_F_WIDTH 1
+/* Default value of bitfield et_en{F} */
+#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
+
+/* rx et_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_en_i[0]"
+ */
+
+/* register address for bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
+/* inverted bitmask for bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
+/* lower bit position of bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
+/* width of bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
+/* default value of bitfield et_en{f} */
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
+
+/* rx et_up{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up_en_i[0]"
+ */
+
+/* register address for bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
+/* inverted bitmask for bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
+/* lower bit position of bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
+/* width of bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
+/* default value of bitfield et_up{f}_en */
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
+
+/* rx et_rxq{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq_en_i[0]"
+ */
+
+/* register address for bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
+/* inverted bitmask for bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
+/* lower bit position of bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
+/* width of bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
+/* default value of bitfield et_rxq{f}_en */
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
+
+/* rx et_up{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up0_i[2:0]"
+ */
+
+/* register address for bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
+/* inverted bitmask for bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
+/* lower bit position of bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
+/* width of bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
+/* default value of bitfield et_up{f}[2:0] */
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
+
+/* rx et_rxq{f}[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq0_i[4:0]"
+ */
+
+/* register address for bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
+/* inverted bitmask for bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
+/* lower bit position of bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
+/* width of bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
+/* default value of bitfield et_rxq{f}[4:0] */
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
+
+/* rx et_mng_rxq{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_mng_rxq{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_mng_rxq_i[0]"
+ */
+
+/* register address for bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
+/* inverted bitmask for bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
+/* lower bit position of bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
+/* width of bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
+/* default value of bitfield et_mng_rxq{f} */
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
+
+/* rx et_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_act0_i[2:0]"
+ */
+
+/* register address for bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
+/* inverted bitmask for bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
+/* lower bit position of bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
+/* width of bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
+/* default value of bitfield et_act{f}[2:0] */
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
+
+/* rx et_val{f}[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_val{f}[f:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_val0_i[15:0]"
+ */
+
+/* register address for bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
+/* inverted bitmask for bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
+/* lower bit position of bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
+/* width of bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
+/* default value of bitfield et_val{f}[f:0] */
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+
+/* rx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_rpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
+/* bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
+/* width of bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
+/* default value of bitfield ipv4_chk_en */
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
+
+/* rx desc{d}_vl_strip bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_vl_strip".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rpo_desc_vl_strip_i[0]"
+ */
+
+/* register address for bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
+/* inverted bitmask for bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
+/* lower bit position of bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
+/* width of bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
+/* default value of bitfield desc{d}_vl_strip */
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
+
+/* rx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_rpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
+/* bitmask for bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
+/* width of bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
+/* default value of bitfield l4_chk_en */
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
+
+/* rx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_rx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
+/* bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
+/* width of bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
+/* default value of bitfield reg_res_dsbl */
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
+
+/* tx dca{d}_cpuid[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
+/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
+/* width of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
+/* default value of bitfield dca{d}_cpuid[7:0] */
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
+
+/* tx lso_en[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_en[1f:0]".
+ * port="pif_tdm_lso_en_i[31:0]"
+ */
+
+/* register address for bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
+/* bitmask for bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
+/* inverted bitmask for bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
+/* lower bit position of bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
+/* width of bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
+/* default value of bitfield lso_en[1f:0] */
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
+
+/* tx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_tdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
+/* bitmask for bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
+/* width of bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
+/* default value of bitfield dca_en */
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
+
+/* tx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_tdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
+/* bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
+/* width of bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
+/* default value of bitfield dca_mode[3:0] */
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
+
+/* tx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
+/* width of bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
+/* default value of bitfield dca{d}_desc_en */
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
+
+/* tx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
+/* width of bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
+/* default value of bitfield desc{d}_en */
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
+
+/* tx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
+
+/* tx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
+/* width of bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
+
+/* tx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_tdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
+/* bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
+/* lower bit position of bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
+/* width of bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
+/* default value of bitfield int_desc_wrb_en */
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
+
+/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* register address for bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+ (0x00007c18 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
+/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
+/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
+/* width of bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
+/* default value of bitfield desc{d}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
+
+/* tx lso_tcp_flag_first[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
+ * port="pif_thm_lso_tcp_flag_first_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
+/* bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
+/* width of bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
+/* default value of bitfield lso_tcp_flag_first[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
+
+/* tx lso_tcp_flag_last[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
+ * port="pif_thm_lso_tcp_flag_last_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
+/* bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
+/* width of bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
+/* default value of bitfield lso_tcp_flag_last[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
+
+/* tx lso_tcp_flag_mid[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
+ * port="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+
+/* Register address for bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
+/* Bitmask for bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
+/* Lower bit position of bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
+/* Width of bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
+/* Default value of bitfield lro_rsc_max[1F:0] */
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
+
+/* RX lro_en[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_en[1F:0]".
+ * PORT="pif_rpo_lro_en_i[31:0]"
+ */
+
+/* Register address for bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
+/* Bitmask for bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
+/* Lower bit position of bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
+/* Width of bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
+/* Default value of bitfield lro_en[1F:0] */
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
+
+/* RX lro_ptopt_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ptopt_en".
+ * PORT="pif_rpo_lro_ptopt_en_i"
+ */
+
+/* Register address for bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
+/* Bitmask for bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
+/* Inverted bitmask for bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
+/* Lower bit position of bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
+/* Width of bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
+/* Default value of bitfield lro_ptopt_en */
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
+
+/* RX lro_q_ses_lmt Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_q_ses_lmt".
+ * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
+/* Bitmask for bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
+/* Inverted bitmask for bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
+/* Lower bit position of bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
+/* Width of bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
+/* Default value of bitfield lro_q_ses_lmt */
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
+
+/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
+ * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
+/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
+/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
+/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
+/* Width of bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
+/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
+
+/* RX lro_pkt_min[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
+ * PORT="pif_rpo_lro_pkt_min_i[4:0]"
+ */
+
+/* Register address for bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
+/* Bitmask for bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
+/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
+/* Lower bit position of bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
+/* Width of bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
+/* Default value of bitfield lro_pkt_min[4:0] */
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
+
+/* Width of bitfield lro{L}_des_max[1:0] */
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
+/* Default value of bitfield lro{L}_des_max[1:0] */
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
+
+/* RX lro_tb_div[11:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
+ * PORT="pif_rpo_lro_tb_div_i[11:0]"
+ */
+
+/* Register address for bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
+/* Bitmask for bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
+/* Inverted bitmask for bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
+/* Lower bit position of bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
+/* Width of bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
+/* Default value of bitfield lro_tb_div[11:0] */
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
+
+/* RX lro_ina_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
+ * PORT="pif_rpo_lro_ina_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
+/* Bitmask for bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
+/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
+/* Lower bit position of bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
+/* Width of bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
+/* Default value of bitfield lro_ina_ival[9:0] */
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
+
+/* RX lro_max_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
+ * PORT="pif_rpo_lro_max_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
+/* Bitmask for bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
+/* Inverted bitmask for bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
+/* Lower bit position of bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
+/* Width of bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
+/* Default value of bitfield lro_max_ival[9:0] */
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
+
+/* TX dca{D}_cpuid[7:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* Register address for bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
+/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
+/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
+/* Width of bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
+/* Default value of bitfield dca{D}_cpuid[7:0] */
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
+
+/* TX dca{D}_desc_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_desc_en".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* Register address for bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
+/* Inverted bitmask for bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
+/* Width of bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
+/* Default value of bitfield dca{D}_desc_en */
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
+
+/* TX desc{D}_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_en".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc_en_i[0]"
+ */
+
+/* Register address for bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
+/* Inverted bitmask for bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
+/* Width of bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
+/* Default value of bitfield desc{D}_en */
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
+
+/* TX desc{D}_hd[C:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* Register address for bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
+/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
+/* Lower bit position of bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
+/* Width of bitfield desc{D}_hd[C:0] */
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
+
+/* TX desc{D}_len[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* Register address for bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
+/* Inverted bitmask for bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
+/* Lower bit position of bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
+/* Width of bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
+/* Default value of bitfield desc{D}_len[9:0] */
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
+
+/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
+ (0x00007C18 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
+/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
+/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
+/* Width of bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
+/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
+
+/* TX tdm_int_mod_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "tdm_int_mod_en".
+ * PORT="pif_tdm_int_mod_en_i"
+ */
+
+/* Register address for bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
+/* Bitmask for bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
+/* Inverted bitmask for bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
+/* Lower bit position of bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
+/* Width of bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
+/* Default value of bitfield tdm_int_mod_en */
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
+
+/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
+ * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+/* register address for bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
+/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
+/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
+/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
+/* width of bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
+/* default value of bitfield lso_tcp_flag_mid[b:0] */
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
+
+/* tx tx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buf_en".
+ * port="pif_tpb_tx_buf_en_i"
+ */
+
+/* register address for bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
+/* bitmask for bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
+/* inverted bitmask for bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
+/* width of bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
+/* default value of bitfield tx_buf_en */
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
+
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
+/* tx tx{b}_hi_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_hi_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
+/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
+/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
+/* width of bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
+/* default value of bitfield tx{b}_hi_thresh[c:0] */
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
+
+/* tx tx{b}_lo_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_lo_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
+/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
+/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
+/* width of bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
+/* default value of bitfield tx{b}_lo_thresh[c:0] */
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
+
+/* tx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_tpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
+/* bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
+/* width of bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
+/* default value of bitfield dma_sys_loopback */
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
+
+/* tx tx{b}_buf_size[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_buf_size_i[7:0]"
+ */
+
+/* register address for bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
+/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
+/* lower bit position of bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
+/* width of bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
+/* default value of bitfield tx{b}_buf_size[7:0] */
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
+
+/* tx tx_scp_ins_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_scp_ins_en".
+ * port="pif_tpb_scp_ins_en_i"
+ */
+
+/* register address for bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
+/* bitmask for bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
+/* inverted bitmask for bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
+/* lower bit position of bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
+/* width of bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
+/* default value of bitfield tx_scp_ins_en */
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
+
+/* tx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_tpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
+/* bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
+/* width of bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
+/* default value of bitfield ipv4_chk_en */
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
+
+/* tx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_tpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
+/* bitmask for bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
+/* width of bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
+/* default value of bitfield l4_chk_en */
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
+
+/* tx pkt_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "pkt_sys_loopback".
+ * port="pif_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
+/* bitmask for bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
+/* inverted bitmask for bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
+/* lower bit position of bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
+/* width of bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
+/* default value of bitfield pkt_sys_loopback */
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
+/* lower bit position of bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
+/* width of bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
+/* default value of bitfield data_tc_arb_mode */
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
+
+/* tx desc_rate_ta_rst bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_ta_rst".
+ * port="pif_tps_desc_rate_ta_rst_i"
+ */
+
+/* register address for bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
+/* bitmask for bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
+/* inverted bitmask for bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
+/* lower bit position of bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
+/* width of bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
+/* default value of bitfield desc_rate_ta_rst */
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
+
+/* tx desc_rate_limit[a:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
+ * port="pif_tps_desc_rate_lim_i[10:0]"
+ */
+
+/* register address for bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
+/* bitmask for bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
+/* inverted bitmask for bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
+/* lower bit position of bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
+/* width of bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
+/* default value of bitfield desc_rate_limit[a:0] */
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
+
+/* tx desc_tc_arb_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
+ * port="pif_tps_desc_tc_arb_mode_i[1:0]"
+ */
+
+/* register address for bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
+/* bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
+/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
+/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
+/* width of bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
+/* default value of bitfield desc_tc_arb_mode[1:0] */
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
+
+/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
+/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
+/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
+/* default value of bitfield desc_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx desc_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
+/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
+/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
+/* width of bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
+/* default value of bitfield desc_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
+
+/* tx desc_vm_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "desc_vm_arb_mode".
+ * port="pif_tps_desc_vm_arb_mode_i"
+ */
+
+/* register address for bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
+/* bitmask for bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
+/* inverted bitmask for bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
+/* lower bit position of bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
+/* width of bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
+/* default value of bitfield desc_vm_arb_mode */
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
+
+/* tx data_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
+/* default value of bitfield data_tc{t}_credit_max[b:0] */
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx data_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
+/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
+/* lower bit position of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
+/* width of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
+/* default value of bitfield data_tc{t}_weight[8:0] */
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
+
+/* tx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_tx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
+/* bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
+/* width of bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
+/* default value of bitfield reg_res_dsbl */
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
+
+/* mac_phy register access busy bitfield definitions
+ * preprocessor definitions for the bitfield "register access busy".
+ * port="msm_pif_reg_busy_o"
+ */
+
+/* register address for bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
+/* bitmask for bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
+/* inverted bitmask for bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
+/* lower bit position of bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
+/* width of bitfield register access busy */
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
+
+/* mac_phy msm register address[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register address[7:0]".
+ * port="pif_msm_reg_addr_i[7:0]"
+ */
+
+/* register address for bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
+/* bitmask for bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
+/* inverted bitmask for bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
+/* lower bit position of bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
+/* width of bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
+/* default value of bitfield msm register address[7:0] */
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
+
+/* mac_phy register read strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register read strobe".
+ * port="pif_msm_reg_rden_i"
+ */
+
+/* register address for bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
+/* bitmask for bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
+/* inverted bitmask for bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
+/* lower bit position of bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
+/* width of bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
+/* default value of bitfield register read strobe */
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
+
+/* mac_phy msm register read data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register read data[31:0]".
+ * port="msm_pif_reg_rd_data_o[31:0]"
+ */
+
+/* register address for bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
+/* bitmask for bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
+/* inverted bitmask for bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
+/* lower bit position of bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
+/* width of bitfield msm register read data[31:0] */
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
+
+/* mac_phy msm register write data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register write data[31:0]".
+ * port="pif_msm_reg_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
+/* bitmask for bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
+/* inverted bitmask for bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
+/* lower bit position of bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
+/* width of bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
+/* default value of bitfield msm register write data[31:0] */
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
+
+/* mac_phy register write strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register write strobe".
+ * port="pif_msm_reg_wren_i"
+ */
+
+/* register address for bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
+/* bitmask for bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
+/* inverted bitmask for bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
+/* lower bit position of bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
+/* width of bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
+/* default value of bitfield register write strobe */
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
+
+/* mif soft reset bitfield definitions
+ * preprocessor definitions for the bitfield "soft reset".
+ * port="pif_glb_res_i"
+ */
+
+/* register address for bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
+/* bitmask for bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
+/* inverted bitmask for bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
+/* lower bit position of bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
+/* width of bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
+/* default value of bitfield soft reset */
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
+
+/* mif register reset disable bitfield definitions
+ * preprocessor definitions for the bitfield "register reset disable".
+ * port="pif_glb_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
+/* bitmask for bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
+/* inverted bitmask for bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
+/* lower bit position of bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
+/* width of bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
+/* default value of bitfield register reset disable */
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
+
+/* tx dma debug control definitions */
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
+
+/* tx dma descriptor base address msw definitions */
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
+ (0x00007c04u + (descriptor) * 0x40)
+
+/* tx dma total request limit */
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00008980
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
+
+/* pcie reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_pci_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
+/* bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
+/* width of bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
+/* default value of bitfield reg_res_dsbl */
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
+
+/* PCI core control register */
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
+
+/* global microprocessor scratch pad definitions */
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+ (0x00000300u + (scratch_scp) * 0x4)
+
+/* register address for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404
+/* bitmask for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002
+/* inverted bitmask for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD
+/* lower bit position of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1
+/* width of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1
+/* default value of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+
+#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
new file mode 100644
index 00000000..f11093a5
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.c
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_ether.h>
+#include "../atl_hw_regs.h"
+
+#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
+#include "../atl_logs.h"
+
+#define HW_ATL_UCP_0X370_REG 0x0370U
+
+#define HW_ATL_MIF_CMD 0x0200U
+#define HW_ATL_MIF_ADDR 0x0208U
+#define HW_ATL_MIF_VAL 0x020CU
+
+#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_MPI_FW_VERSION 0x18
+#define HW_ATL_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
+
+#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
+#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
+
+#define HW_ATL_MAC_PHY_CONTROL 0x4000
+#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
+
+#define HW_ATL_FW_VER_1X 0x01050006U
+#define HW_ATL_FW_VER_2X 0x02000000U
+#define HW_ATL_FW_VER_3X 0x03000000U
+
+#define FORCE_FLASHLESS 0
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+{
+ int err = 0;
+
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &self->chip_features);
+
+ hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+
+ if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_1x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else {
+ PMD_DRV_LOG(ERR, "Bad FW version detected: %x\n",
+ self->fw_ver_actual);
+ return -EOPNOTSUPP;
+ }
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+ return err;
+}
+
+static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
+{
+ u32 gsr, val;
+ int k = 0;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ AQ_HW_SLEEP(50);
+
+ /* Cleanup SPI */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
+
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ /* Kickstart MAC */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+ aq_hw_write_reg(self, 0x520, 0x1);
+
+ /* Reset SPI again because of possible interrupted SPI burst */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
+ AQ_HW_SLEEP(10);
+ /* Clear SPI reset state */
+ aq_hw_write_reg(self, 0x53C, val & ~0x10);
+
+ aq_hw_write_reg(self, 0x404, 0x180e0);
+
+ for (k = 0; k < 1000; k++) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+
+ flb_status = flb_status & 0x10;
+ if (flb_status)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ PMD_DRV_LOG(ERR, "MAC kickstart failed\n");
+ return -EIO;
+ }
+
+ /* FW reset */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ AQ_HW_SLEEP(50);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+
+ /* Kickstart PHY - skipped */
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ PMD_DRV_LOG(ERR, "FW kickstart failed\n");
+ return -EIO;
+ }
+ /* Old FW requires fixed delay after init */
+ AQ_HW_SLEEP(15);
+
+ return 0;
+}
+
+static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
+{
+ u32 gsr, val, rbl_status;
+ int k;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+
+ /* Alter RBL status */
+ aq_hw_write_reg(self, 0x388, 0xDEAD);
+
+ /* Cleanup SPI */
+ val = aq_hw_read_reg(self, 0x53C);
+ aq_hw_write_reg(self, 0x53C, val | 0x10);
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
+ (gsr & 0xFFFFBFFF) | 0x8000);
+
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0x0);
+
+ aq_hw_write_reg(self, 0x404, 0x40e0);
+
+ /* Wait for RBL boot */
+ for (k = 0; k < 1000; k++) {
+ rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
+ if (rbl_status && rbl_status != 0xDEAD)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (!rbl_status || rbl_status == 0xDEAD) {
+ PMD_DRV_LOG(ERR, "RBL Restart failed");
+ return -EIO;
+ }
+
+ /* Restore NVR */
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0xA0);
+
+ if (rbl_status == 0xF1A7) {
+ PMD_DRV_LOG(ERR, "No FW detected. Dynamic FW load not implemented\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ PMD_DRV_LOG(ERR, "FW kickstart failed\n");
+ return -EIO;
+ }
+ /* Old FW requires fixed delay after init */
+ AQ_HW_SLEEP(15);
+
+ return 0;
+}
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+ int k;
+ u32 boot_exit_code = 0;
+
+ for (k = 0; k < 1000; ++k) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+ boot_exit_code = aq_hw_read_reg(self,
+ HW_ATL_MPI_BOOT_EXIT_CODE);
+ if (flb_status != 0x06000000 || boot_exit_code != 0)
+ break;
+ }
+
+ if (k == 1000) {
+ PMD_DRV_LOG(ERR, "Neither RBL nor FLB firmware started\n");
+ return -EOPNOTSUPP;
+ }
+
+ self->rbl_enabled = (boot_exit_code != 0);
+
+ /* FW 1.x may bootup in an invalid POWER state (WOL feature).
+ * We should work around this by forcing its state back to DEINIT
+ */
+ if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
+ aq_hw_read_reg(self,
+ HW_ATL_MPI_FW_VERSION))) {
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
+ HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
+ 10, 1000U);
+ }
+
+ if (self->rbl_enabled)
+ err = hw_atl_utils_soft_reset_rbl(self);
+ else
+ err = hw_atl_utils_soft_reset_flb(self);
+
+ return err;
+}
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt)
+{
+ int err = 0;
+
+ AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
+ HW_ATL_FW_SM_RAM) == 1U,
+ 1U, 10000U);
+
+ if (err < 0) {
+ bool is_locked;
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ if (!is_locked) {
+ err = -ETIMEDOUT;
+ goto err_exit;
+ }
+ }
+
+ aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
+
+ for (++cnt; --cnt && !err;) {
+ aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
+
+ if (IS_CHIP_FEATURE(REVISION_B1))
+ AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self,
+ HW_ATL_MIF_ADDR),
+ 1, 1000U);
+ else
+ AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self,
+ HW_ATL_MIF_CMD)),
+ 1, 1000U);
+
+ *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
+ a += 4;
+ }
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
+ u32 cnt)
+{
+ int err = 0;
+ bool is_locked;
+
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ if (!is_locked) {
+ err = -ETIMEDOUT;
+ goto err_exit;
+ }
+ if (IS_CHIP_FEATURE(REVISION_B1)) {
+ u32 offset = 0;
+
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x328, p[offset]);
+ aq_hw_write_reg(self, 0x32C,
+ (0x80000000 | (0xFFFF & (offset * 4))));
+ hw_atl_mcp_up_force_intr_set(self, 1);
+ /* 1000 times by 10us = 10ms */
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self,
+ 0x32C) & 0xF0000000) != 0x80000000,
+ 10, 1000);
+ }
+ } else {
+ u32 offset = 0;
+
+ aq_hw_write_reg(self, 0x208, a);
+
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x20C, p[offset]);
+ aq_hw_write_reg(self, 0x200, 0xC000);
+
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U)
+ & 0x100) == 0, 10, 1000);
+ }
+ }
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+{
+ int err = 0;
+ const u32 dw_major_mask = 0xff000000U;
+ const u32 dw_minor_mask = 0x00ffffffU;
+
+ err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
+ if (err < 0)
+ goto err_exit;
+ err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
+ -EOPNOTSUPP : 0;
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_init_ucp(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ if (!aq_hw_read_reg(self, 0x370U)) {
+ unsigned int rnd = (uint32_t)rte_rand();
+ unsigned int ucp_0x370 = 0U;
+
+ ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
+ aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
+ AQ_HW_WAIT_FOR(0U != (self->rpc_addr =
+ aq_hw_read_reg(self, 0x334U)), 1000U, 100U);
+
+ return err;
+}
+
+#define HW_ATL_RPC_CONTROL_ADR 0x0338U
+#define HW_ATL_RPC_STATE_ADR 0x033CU
+
+struct aq_hw_atl_utils_fw_rpc_tid_s {
+ union {
+ u32 val;
+ struct {
+ u16 tid;
+ u16 len;
+ };
+ };
+};
+
+#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
+
+int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
+{
+ int err = 0;
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+
+ if (!IS_CHIP_FEATURE(MIPS)) {
+ err = -1;
+ goto err_exit;
+ }
+ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+ (u32 *)(void *)&self->rpc,
+ (rpc_size + sizeof(u32) -
+ sizeof(u8)) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ sw.tid = 0xFFFFU & (++self->rpc_tid);
+ sw.len = (u16)rpc_size;
+ aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
+
+err_exit:
+ return err;
+}
+
+int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_fw_rpc **rpc)
+{
+ int err = 0;
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+
+ do {
+ sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
+
+ self->rpc_tid = sw.tid;
+
+ AQ_HW_WAIT_FOR(sw.tid ==
+ (fw.val =
+ aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
+ fw.tid), 1000U, 100U);
+ if (err < 0)
+ goto err_exit;
+
+ if (fw.len == 0xFFFFU) {
+ err = hw_atl_utils_fw_rpc_call(self, sw.len);
+ if (err < 0)
+ goto err_exit;
+ }
+ } while (sw.tid != fw.tid || 0xFFFFU == fw.len);
+ if (err < 0)
+ goto err_exit;
+
+ if (rpc) {
+ if (fw.len) {
+ err =
+ hw_atl_utils_fw_downld_dwords(self,
+ self->rpc_addr,
+ (u32 *)(void *)
+ &self->rpc,
+ (fw.len + sizeof(u32) -
+ sizeof(u8)) /
+ sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+ }
+
+ *rpc = &self->rpc;
+ }
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ err = hw_atl_utils_init_ucp(self);
+ if (err < 0)
+ goto err_exit;
+
+ err = hw_atl_utils_fw_rpc_init(self);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox_header *pmbox)
+{
+ return hw_atl_utils_fw_downld_dwords(self,
+ self->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+}
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox *pmbox)
+{
+ int err = 0;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ if (IS_CHIP_FEATURE(REVISION_A0)) {
+ unsigned int mtu = 1514;
+ pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
+ pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
+ } else {
+ pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+ }
+
+err_exit:;
+}
+
+static
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
+{
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+
+ val = val & ~HW_ATL_MPI_SPEED_MSK;
+ val |= speed << HW_ATL_MPI_SPEED_SHIFT;
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+
+ return 0;
+}
+
+int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ int err = 0;
+ u32 transaction_id = 0;
+ struct hw_aq_atl_utils_mbox_header mbox;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+
+ if (state == MPI_RESET) {
+ hw_atl_utils_mpi_read_mbox(self, &mbox);
+
+ transaction_id = mbox.transaction_id;
+
+ AQ_HW_WAIT_FOR(transaction_id !=
+ (hw_atl_utils_mpi_read_mbox(self, &mbox),
+ mbox.transaction_id),
+ 1000U, 100U);
+ if (err < 0)
+ goto err_exit;
+ }
+ /* On interface DEINIT we disable DW (raise bit)
+ * Otherwise enable DW (clear bit)
+ */
+ if (state == MPI_DEINIT || state == MPI_POWER)
+ val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
+ else
+ val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
+
+ /* Set new state bits */
+ val = val & ~HW_ATL_MPI_STATE_MSK;
+ val |= state & HW_ATL_MPI_STATE_MSK;
+
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+err_exit:
+ return err;
+}
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
+{
+ u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
+ u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+
+ if (!link_speed_mask) {
+ link_status->mbps = 0U;
+ } else {
+ switch (link_speed_mask) {
+ case HAL_ATLANTIC_RATE_10G:
+ link_status->mbps = 10000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_5G:
+ case HAL_ATLANTIC_RATE_5GSR:
+ link_status->mbps = 5000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_2GS:
+ link_status->mbps = 2500U;
+ break;
+
+ case HAL_ATLANTIC_RATE_1G:
+ link_status->mbps = 1000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_100M:
+ link_status->mbps = 100U;
+ break;
+
+ default:
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+ u8 *mac)
+{
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+ u32 mac_addr[2];
+
+ if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
+ unsigned int rnd = (uint32_t)rte_rand();
+ unsigned int ucp_0x370 = 0;
+
+ //get_random_bytes(&rnd, sizeof(unsigned int));
+
+ ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ aq_hw_read_reg(self, 0x00000374U) +
+ (40U * 4U),
+ mac_addr,
+ ARRAY_SIZE(mac_addr));
+ if (err < 0) {
+ mac_addr[0] = 0U;
+ mac_addr[1] = 0U;
+ err = 0;
+ } else {
+ mac_addr[0] = rte_constant_bswap32(mac_addr[0]);
+ mac_addr[1] = rte_constant_bswap32(mac_addr[1]);
+ }
+
+ ether_addr_copy((struct ether_addr *)mac_addr,
+ (struct ether_addr *)mac);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ /* chip revision */
+ l = 0xE3000000U
+ | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG))
+ | (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+
+ return err;
+}
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
+{
+ unsigned int ret = 0U;
+
+ switch (mbps) {
+ case 100U:
+ ret = 5U;
+ break;
+
+ case 1000U:
+ ret = 4U;
+ break;
+
+ case 2500U:
+ ret = 3U;
+ break;
+
+ case 5000U:
+ ret = 1U;
+ break;
+
+ case 10000U:
+ ret = 0U;
+ break;
+
+ default:
+ break;
+ }
+ return ret;
+}
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
+{
+ u32 chip_features = 0U;
+ u32 val = hw_atl_reg_glb_mif_id_get(self);
+ u32 mif_rev = val & 0xFFU;
+
+ if ((0xFU & mif_rev) == 1U) {
+ chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS;
+ } else if ((0xFU & mif_rev) == 2U) {
+ chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS |
+ HAL_ATLANTIC_UTILS_CHIP_TPO2 |
+ HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ } else if ((0xFU & mif_rev) == 0xAU) {
+ chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS |
+ HAL_ATLANTIC_UTILS_CHIP_TPO2 |
+ HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ }
+
+ *p = chip_features;
+}
+
+static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
+{
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+ return 0;
+}
+
+int hw_atl_utils_update_stats(struct aq_hw_s *self)
+{
+ struct hw_aq_atl_utils_mbox mbox;
+
+ hw_atl_utils_mpi_read_stats(self, &mbox);
+
+#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
+ mbox.stats._N_ - self->last_stats._N_)
+
+ if (1) {//self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+ AQ_SDELTA(dpc);
+ }
+#undef AQ_SDELTA
+ self->curr_stats.dma_pkt_rc =
+ hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self) +
+ ((u64)hw_atl_stats_rx_dma_good_pkt_countermsw_get(self) << 32);
+ self->curr_stats.dma_pkt_tc =
+ hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self) +
+ ((u64)hw_atl_stats_tx_dma_good_pkt_countermsw_get(self) << 32);
+ self->curr_stats.dma_oct_rc =
+ hw_atl_stats_rx_dma_good_octet_counterlsw_get(self) +
+ ((u64)hw_atl_stats_rx_dma_good_octet_countermsw_get(self) << 32);
+ self->curr_stats.dma_oct_tc =
+ hw_atl_stats_tx_dma_good_octet_counterlsw_get(self) +
+ ((u64)hw_atl_stats_tx_dma_good_octet_countermsw_get(self) << 32);
+
+ self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+ memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
+
+ return 0;
+}
+
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
+{
+ return &self->curr_stats;
+}
+
+static const u32 hw_atl_utils_hw_mac_regs[] = {
+ 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
+ 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
+ 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
+ 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
+ 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
+ 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
+ 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
+ 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
+ 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
+ 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
+ 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
+ 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
+ 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
+ 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
+ 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
+ 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
+ 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
+ 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
+ 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
+ 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
+ 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
+ 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
+};
+
+unsigned int hw_atl_utils_hw_get_reg_length(void)
+{
+ return ARRAY_SIZE(hw_atl_utils_hw_mac_regs);
+}
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ u32 *regs_buff)
+{
+ unsigned int i = 0U;
+ unsigned int mac_regs_count = hw_atl_utils_hw_get_reg_length();
+
+ for (i = 0; i < mac_regs_count; i++)
+ regs_buff[i] = aq_hw_read_reg(self,
+ hw_atl_utils_hw_mac_regs[i]);
+ return 0;
+}
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
+{
+ *fw_version = aq_hw_read_reg(self, 0x18U);
+ return 0;
+}
+
+static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
+{
+ struct hw_aq_atl_utils_fw_rpc *prpc = NULL;
+ unsigned int rpc_size = 0U;
+ int err = 0;
+
+ err = hw_atl_utils_fw_rpc_wait(self, &prpc);
+ if (err < 0)
+ goto err_exit;
+
+ memset(prpc, 0, sizeof(*prpc));
+
+ if (wol_enabled) {
+ rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
+ prpc->msg_wol.priority = 0x10000000; /* normal priority */
+ prpc->msg_wol.pattern_id = 1U;
+ prpc->msg_wol.wol_packet_type = 2U; /* Magic Packet */
+
+ ether_addr_copy((struct ether_addr *)mac,
+ (struct ether_addr *)&prpc->msg_wol.wol_pattern);
+ } else {
+ rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
+ prpc->msg_wol.pattern_id = 1U;
+ }
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+err_exit:
+ return err;
+}
+
+static
+int aq_fw1x_set_power(struct aq_hw_s *self,
+ unsigned int power_state __rte_unused,
+ u8 *mac)
+{
+ struct hw_aq_atl_utils_fw_rpc *prpc = NULL;
+ unsigned int rpc_size = 0U;
+ int err = 0;
+ if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
+ err = aq_fw1x_set_wol(self, 1, mac);
+
+ if (err < 0)
+ goto err_exit;
+
+ rpc_size = sizeof(prpc->msg_id) +
+ sizeof(prpc->msg_enable_wakeup);
+
+ err = hw_atl_utils_fw_rpc_wait(self, &prpc);
+
+ if (err < 0)
+ goto err_exit;
+
+ memset(prpc, 0, rpc_size);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP;
+ prpc->msg_enable_wakeup.pattern_mask = 0x00000002;
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_POWER);
+err_exit:
+ return err;
+}
+
+
+
+const struct aq_fw_ops aq_fw_1x_ops = {
+ .init = hw_atl_utils_mpi_create,
+ .deinit = hw_atl_fw1x_deinit,
+ .reset = NULL,
+ .get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .set_link_speed = hw_atl_utils_mpi_set_speed,
+ .set_state = hw_atl_utils_mpi_set_state,
+ .update_link_status = hw_atl_utils_mpi_get_link_status,
+ .update_stats = hw_atl_utils_update_stats,
+ .set_power = aq_fw1x_set_power,
+ .get_temp = NULL,
+ .get_cable_len = NULL,
+ .set_eee_rate = NULL,
+ .get_eee_rate = NULL,
+ .set_flow_control = NULL,
+ .led_control = NULL,
+ .get_eeprom = NULL,
+ .set_eeprom = NULL,
+};
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
new file mode 100644
index 00000000..5f3f7084
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils.h
@@ -0,0 +1,510 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#ifndef HW_ATL_UTILS_H
+#define HW_ATL_UTILS_H
+
+#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+
+/* Hardware tx descriptor */
+struct hw_atl_txd_s {
+ u64 buf_addr;
+
+ union {
+ struct {
+ u32 type:3;
+ u32:1;
+ u32 len:16;
+ u32 dd:1;
+ u32 eop:1;
+ u32 cmd:8;
+ u32:14;
+ u32 ct_idx:1;
+ u32 ct_en:1;
+ u32 pay_len:18;
+ } __attribute__((__packed__));
+ u64 flags;
+ };
+} __attribute__((__packed__));
+
+/* Hardware tx context descriptor */
+union hw_atl_txc_s {
+ struct {
+ u64 flags1;
+ u64 flags2;
+ };
+
+ struct {
+ u64:40;
+ u32 tun_len:8;
+ u32 out_len:16;
+ u32 type:3;
+ u32 idx:1;
+ u32 vlan_tag:16;
+ u32 cmd:4;
+ u32 l2_len:7;
+ u32 l3_len:9;
+ u32 l4_len:8;
+ u32 mss_len:16;
+ } __attribute__((__packed__));
+} __attribute__((__packed__));
+
+enum aq_tx_desc_type {
+ tx_desc_type_desc = 1,
+ tx_desc_type_ctx = 2,
+};
+
+enum aq_tx_desc_cmd {
+ tx_desc_cmd_vlan = 1,
+ tx_desc_cmd_fcs = 2,
+ tx_desc_cmd_ipv4 = 4,
+ tx_desc_cmd_l4cs = 8,
+ tx_desc_cmd_lso = 0x10,
+ tx_desc_cmd_wb = 0x20,
+};
+
+
+/* Hardware rx descriptor */
+struct hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+} __attribute__((__packed__));
+
+/* Hardware rx descriptor writeback */
+struct hw_atl_rxd_wb_s {
+ u32 rss_type:4;
+ u32 pkt_type:8;
+ u32 type:20;
+ u32 rss_hash;
+ u16 dd:1;
+ u16 eop:1;
+ u16 rx_stat:4;
+ u16 rx_estat:6;
+ u16 rsc_cnt:4;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+} __attribute__((__packed__));
+
+struct hw_atl_stats_s {
+ u32 uprc;
+ u32 mprc;
+ u32 bprc;
+ u32 erpt;
+ u32 uptc;
+ u32 mptc;
+ u32 bptc;
+ u32 erpr;
+ u32 mbtc;
+ u32 bbtc;
+ u32 mbrc;
+ u32 bbrc;
+ u32 ubrc;
+ u32 ubtc;
+ u32 dpc;
+} __attribute__((__packed__));
+
+union ip_addr {
+ struct {
+ u8 addr[16];
+ } v6;
+ struct {
+ u8 padding[12];
+ u8 addr[4];
+ } v4;
+} __attribute__((__packed__));
+
+struct hw_aq_atl_utils_fw_rpc {
+ u32 msg_id;
+
+ union {
+ struct {
+ u32 pong;
+ } msg_ping;
+
+ struct {
+ u8 mac_addr[6];
+ u32 ip_addr_cnt;
+
+ struct {
+ union ip_addr addr;
+ union ip_addr mask;
+ } ip[1];
+ } msg_arp;
+
+ struct {
+ u32 len;
+ u8 packet[1514U];
+ } msg_inject;
+
+ struct {
+ u32 priority;
+ u32 wol_packet_type;
+ u32 pattern_id;
+ u32 next_wol_pattern_offset;
+ union {
+ struct {
+ u32 flags;
+ u8 ipv4_source_address[4];
+ u8 ipv4_dest_address[4];
+ u16 tcp_source_port_number;
+ u16 tcp_dest_port_number;
+ } ipv4_tcp_syn_parameters;
+
+ struct {
+ u32 flags;
+ u8 ipv6_source_address[16];
+ u8 ipv6_dest_address[16];
+ u16 tcp_source_port_number;
+ u16 tcp_dest_port_number;
+ } ipv6_tcp_syn_parameters;
+
+ struct {
+ u32 flags;
+ } eapol_request_id_message_parameters;
+
+ struct {
+ u32 flags;
+ u32 mask_offset;
+ u32 mask_size;
+ u32 pattern_offset;
+ u32 pattern_size;
+ } wol_bit_map_pattern;
+ struct {
+ u8 mac_addr[6];
+ } wol_magic_packet_pattern;
+
+ } wol_pattern;
+ } msg_wol;
+
+ struct {
+ u16 tc_quanta[8];
+ u16 tc_threshold[8];
+ } msg_msm_pfc_quantas;
+
+ struct {
+ union {
+ u32 pattern_mask;
+ struct {
+ u32 aq_pm_wol_reason_arp_v4_pkt : 1;
+ u32 aq_pm_wol_reason_ipv4_ping_pkt : 1;
+ u32 aq_pm_wol_reason_ipv6_ns_pkt : 1;
+ u32 aq_pm_wol_reason_ipv6_ping_pkt : 1;
+ u32 aq_pm_wol_reason_link_up : 1;
+ u32 aq_pm_wol_reason_link_down : 1;
+ u32 aq_pm_wol_reason_maximum : 1;
+ };
+ };
+ union {
+ u32 offload_mask;
+ };
+ } msg_enable_wakeup;
+
+ struct {
+ u32 priority;
+ u32 protocol_offload_type;
+ u32 protocol_offload_id;
+ u32 next_protocol_offload_offset;
+
+ union {
+ struct {
+ u32 flags;
+ u8 remote_ipv4_addr[4];
+ u8 host_ipv4_addr[4];
+ u8 mac_addr[6];
+ } ipv4_arp_params;
+ };
+ } msg_offload;
+
+ struct {
+ u32 id;
+ } msg_del_id;
+
+ };
+} __attribute__((__packed__));
+
+struct hw_aq_atl_utils_mbox_header {
+ u32 version;
+ u32 transaction_id;
+ u32 error;
+} __attribute__((__packed__));
+
+struct hw_aq_info {
+ u8 reserved[6];
+ u16 phy_fault_code;
+ u16 phy_temperature;
+ u8 cable_len;
+ u8 reserved1;
+ u32 cable_diag_data[4];
+ u8 reserved2[32];
+ u32 caps_lo;
+ u32 caps_hi;
+} __attribute__((__packed__));
+
+struct hw_aq_atl_utils_mbox {
+ struct hw_aq_atl_utils_mbox_header header;
+ struct hw_atl_stats_s stats;
+ struct hw_aq_info info;
+} __attribute__((__packed__));
+
+/* fw2x */
+typedef u16 in_port_t;
+typedef u32 ip4_addr_t;
+typedef int int32_t;
+typedef short int16_t;
+typedef u32 fw_offset_t;
+
+struct ip6_addr {
+ u32 addr[4];
+} __attribute__((__packed__));
+
+struct offload_ka_v4 {
+ u32 timeout;
+ in_port_t local_port;
+ in_port_t remote_port;
+ u8 remote_mac_addr[6];
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ ip4_addr_t local_ip;
+ ip4_addr_t remote_ip;
+} __attribute__((__packed__));
+
+struct offload_ka_v6 {
+ u32 timeout;
+ in_port_t local_port;
+ in_port_t remote_port;
+ u8 remote_mac_addr[6];
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ struct ip6_addr local_ip;
+ struct ip6_addr remote_ip;
+} __attribute__((__packed__));
+
+struct offload_ip_info {
+ u8 v4_local_addr_count;
+ u8 v4_addr_count;
+ u8 v6_local_addr_count;
+ u8 v6_addr_count;
+ fw_offset_t v4_addr;
+ fw_offset_t v4_prefix;
+ fw_offset_t v6_addr;
+ fw_offset_t v6_prefix;
+} __attribute__((__packed__));
+
+struct offload_port_info {
+ u16 udp_port_count;
+ u16 tcp_port_count;
+ fw_offset_t udp_port;
+ fw_offset_t tcp_port;
+} __attribute__((__packed__));
+
+struct offload_ka_info {
+ u16 v4_ka_count;
+ u16 v6_ka_count;
+ u32 retry_count;
+ u32 retry_interval;
+ fw_offset_t v4_ka;
+ fw_offset_t v6_ka;
+} __attribute__((__packed__));
+
+struct offload_rr_info {
+ u32 rr_count;
+ u32 rr_buf_len;
+ fw_offset_t rr_id_x;
+ fw_offset_t rr_buf;
+} __attribute__((__packed__));
+
+struct offload_info {
+ u32 version; // current version is 0x00000000
+ u32 len; // The whole structure length
+ // including the variable-size buf
+ u8 mac_addr[6]; // 8 bytes to keep alignment. Only
+ // first 6 meaningful.
+
+ u8 reserved[2];
+
+ struct offload_ip_info ips;
+ struct offload_port_info ports;
+ struct offload_ka_info kas;
+ struct offload_rr_info rrs;
+ u8 buf[0];
+} __attribute__((__packed__));
+
+struct smbus_read_request {
+ u32 offset; /* not used */
+ u32 device_id;
+ u32 address;
+ u32 length;
+} __attribute__((__packed__));
+
+struct smbus_write_request {
+ u32 offset; /* not used */
+ u32 device_id;
+ u32 address;
+ u32 length;
+} __attribute__((__packed__));
+
+#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
+#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
+#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
+#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
+
+
+#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
+ self->chip_features)
+
+enum hal_atl_utils_fw_state_e {
+ MPI_DEINIT = 0,
+ MPI_RESET = 1,
+ MPI_INIT = 2,
+ MPI_POWER = 4,
+};
+
+#define HAL_ATLANTIC_RATE_10G BIT(0)
+#define HAL_ATLANTIC_RATE_5G BIT(1)
+#define HAL_ATLANTIC_RATE_5GSR BIT(2)
+#define HAL_ATLANTIC_RATE_2GS BIT(3)
+#define HAL_ATLANTIC_RATE_1G BIT(4)
+#define HAL_ATLANTIC_RATE_100M BIT(5)
+#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+
+#define HAL_ATLANTIC_UTILS_FW_MSG_PING 1U
+#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 2U
+#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 3U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 4U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 5U
+#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 6U
+#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 7U
+#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 8U
+#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 9U
+#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 10U
+#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 13U // 0xd
+
+#define SMBUS_READ_REQUEST BIT(13)
+#define SMBUS_WRITE_REQUEST BIT(14)
+#define SMBUS_DEVICE_ID 0x50
+
+enum hw_atl_fw2x_rate {
+ FW2X_RATE_100M = 0x20,
+ FW2X_RATE_1G = 0x100,
+ FW2X_RATE_2G5 = 0x200,
+ FW2X_RATE_5G = 0x400,
+ FW2X_RATE_10G = 0x800,
+};
+
+enum hw_atl_fw2x_caps_lo {
+ CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_FD,
+ CAPS_LO_100BASETX_HD,
+ CAPS_LO_100BASET4_HD,
+ CAPS_LO_100BASET2_HD,
+ CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASET2_FD,
+ CAPS_LO_1000BASET_HD,
+ CAPS_LO_1000BASET_FD,
+ CAPS_LO_2P5GBASET_FD,
+ CAPS_LO_5GBASET_FD,
+ CAPS_LO_10GBASET_FD,
+};
+
+enum hw_atl_fw2x_caps_hi {
+ CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_10BASET_EEE,
+ CAPS_HI_RESERVED2,
+ CAPS_HI_PAUSE,
+ CAPS_HI_ASYMMETRIC_PAUSE,
+ CAPS_HI_100BASETX_EEE,
+ CAPS_HI_RESERVED3,
+ CAPS_HI_RESERVED4,
+ CAPS_HI_1000BASET_FD_EEE,
+ CAPS_HI_2P5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_10GBASET_FD_EEE,
+ CAPS_HI_RESERVED5,
+ CAPS_HI_RESERVED6,
+ CAPS_HI_RESERVED7,
+ CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED9,
+ CAPS_HI_CABLE_DIAG,
+ CAPS_HI_TEMPERATURE,
+ CAPS_HI_DOWNSHIFT,
+ CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_MEDIA_DETECT,
+ CAPS_HI_LINK_DROP,
+ CAPS_HI_SLEEP_PROXY,
+ CAPS_HI_WOL,
+ CAPS_HI_MAC_STOP,
+ CAPS_HI_EXT_LOOPBACK,
+ CAPS_HI_INT_LOOPBACK,
+ CAPS_HI_EFUSE_AGENT,
+ CAPS_HI_WOL_TIMER,
+ CAPS_HI_STATISTICS,
+ CAPS_HI_TRANSACTION_ID,
+};
+
+struct aq_hw_s;
+struct aq_fw_ops;
+struct aq_hw_link_status_s;
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self);
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
+
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox_header *pmbox);
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox *pmbox);
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state,
+ u32 speed);
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
+
+unsigned int hw_atl_utils_hw_get_reg_length(void);
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ u32 *regs_buff);
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+ unsigned int power_state);
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+
+int hw_atl_utils_update_stats(struct aq_hw_s *self);
+
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt);
+
+int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
+ u32 cnt);
+
+int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
+
+int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
+
+int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_fw_rpc **rpc);
+
+extern const struct aq_fw_ops aq_fw_1x_ops;
+extern const struct aq_fw_ops aq_fw_2x_ops;
+
+#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
new file mode 100644
index 00000000..6841d9bc
--- /dev/null
+++ b/drivers/net/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
+ * Atlantic hardware abstraction layer.
+ */
+
+#include <rte_ether.h>
+#include "../atl_hw_regs.h"
+
+#include "../atl_types.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
+
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c
+
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
+#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
+
+#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_2G5_MASK BIT(CAPS_HI_2P5GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE)
+
+#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
+#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
+
+#define HW_ATL_FW_FEATURE_EEPROM 0x03010025
+#define HW_ATL_FW_FEATURE_LED 0x03010026
+
+struct fw2x_msg_wol_pattern {
+ u8 mask[16];
+ u32 crc;
+} __attribute__((__packed__));
+
+struct fw2x_msg_wol {
+ u32 msg_id;
+ u8 hw_addr[6];
+ u8 magic_packet_enabled;
+ u8 filter_count;
+ struct fw2x_msg_wol_pattern filter[HAL_ATLANTIC_WOL_FILTERS_COUNT];
+ u8 link_up_enabled;
+ u8 link_down_enabled;
+ u16 reserved;
+ u32 link_up_timeout;
+ u32 link_down_timeout;
+} __attribute__((__packed__));
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
+static int aq_fw2x_init(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
+ aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
+ 1000U, 10U);
+ AQ_HW_WAIT_FOR(0U != (self->rpc_addr =
+ aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)),
+ 1000U, 100U);
+ return err;
+}
+
+static int aq_fw2x_deinit(struct aq_hw_s *self)
+{
+ int err = aq_fw2x_set_link_speed(self, 0);
+
+ if (!err)
+ err = aq_fw2x_set_state(self, MPI_DEINIT);
+
+ return err;
+}
+
+static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
+{
+ enum hw_atl_fw2x_rate rate = 0;
+
+ if (speed & AQ_NIC_RATE_10G)
+ rate |= FW2X_RATE_10G;
+
+ if (speed & AQ_NIC_RATE_5G)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_5G5R)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_2G5)
+ rate |= FW2X_RATE_2G5;
+
+ if (speed & AQ_NIC_RATE_1G)
+ rate |= FW2X_RATE_1G;
+
+ if (speed & AQ_NIC_RATE_100M)
+ rate |= FW2X_RATE_100M;
+
+ return rate;
+}
+
+static u32 fw2x_to_eee_mask(u32 speed)
+{
+ u32 rate = 0;
+
+ if (speed & HW_ATL_FW2X_CAP_EEE_10G_MASK)
+ rate |= AQ_NIC_RATE_EEE_10G;
+
+ if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK)
+ rate |= AQ_NIC_RATE_EEE_5G;
+
+ if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK)
+ rate |= AQ_NIC_RATE_EEE_2G5;
+
+ if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK)
+ rate |= AQ_NIC_RATE_EEE_1G;
+
+ return rate;
+}
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ u32 val = link_speed_mask_2fw2x_ratemask(speed);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val);
+
+ return 0;
+}
+
+static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+{
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ *mpi_state |= BIT(CAPS_HI_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+}
+
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ switch (state) {
+ case MPI_INIT:
+ mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ break;
+ case MPI_DEINIT:
+ mpi_state |= BIT(CAPS_HI_LINK_DROP);
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+ return 0;
+}
+
+static int aq_fw2x_update_link_status(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+
+ if (speed) {
+ if (speed & FW2X_RATE_10G)
+ link_status->mbps = 10000;
+ else if (speed & FW2X_RATE_5G)
+ link_status->mbps = 5000;
+ else if (speed & FW2X_RATE_2G5)
+ link_status->mbps = 2500;
+ else if (speed & FW2X_RATE_1G)
+ link_status->mbps = 1000;
+ else if (speed & FW2X_RATE_100M)
+ link_status->mbps = 100;
+ else
+ link_status->mbps = 10000;
+ } else {
+ link_status->mbps = 0;
+ }
+
+ return 0;
+}
+
+static
+int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+ u32 mac_addr[2] = { 0 };
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+
+ if (efuse_addr != 0) {
+ err = hw_atl_utils_fw_downld_dwords(self,
+ efuse_addr + (40U * 4U),
+ mac_addr,
+ ARRAY_SIZE(mac_addr));
+ if (err)
+ return err;
+ mac_addr[0] = rte_constant_bswap32(mac_addr[0]);
+ mac_addr[1] = rte_constant_bswap32(mac_addr[1]);
+ }
+
+ ether_addr_copy((struct ether_addr *)mac_addr,
+ (struct ether_addr *)mac);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ unsigned int rnd = (uint32_t)rte_rand();
+
+ //get_random_bytes(&rnd, sizeof(unsigned int));
+
+ l = 0xE3000000U
+ | (0xFFFFU & rnd)
+ | (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+ return err;
+}
+
+static int aq_fw2x_update_stats(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
+
+ /* Toggle statistics bit for FW to update */
+ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ /* Wait FW to report back */
+ AQ_HW_WAIT_FOR(orig_stats_val !=
+ (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ BIT(CAPS_HI_STATISTICS)),
+ 1U, 10000U);
+ if (err)
+ return err;
+
+ return hw_atl_utils_update_stats(self);
+}
+
+static int aq_fw2x_get_temp(struct aq_hw_s *self, int *temp)
+{
+ int err = 0;
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 temp_val = mpi_opts & BIT(CAPS_HI_TEMPERATURE);
+ u32 temp_res;
+
+ /* Toggle statistics bit for FW to 0x36C.18 (CAPS_HI_TEMPERATURE) */
+ mpi_opts = mpi_opts ^ BIT(CAPS_HI_TEMPERATURE);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ /* Wait FW to report back */
+ AQ_HW_WAIT_FOR(temp_val !=
+ (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ BIT(CAPS_HI_TEMPERATURE)), 1U, 10000U);
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->mbox_addr +
+ offsetof(struct hw_aq_atl_utils_mbox, info) +
+ offsetof(struct hw_aq_info, phy_temperature),
+ &temp_res,
+ sizeof(temp_res) / sizeof(u32));
+
+ if (err)
+ return err;
+
+ *temp = temp_res * 100 / 256;
+ return 0;
+}
+
+static int aq_fw2x_get_cable_len(struct aq_hw_s *self, int *cable_len)
+{
+ int err = 0;
+ u32 cable_len_res;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->mbox_addr +
+ offsetof(struct hw_aq_atl_utils_mbox, info) +
+ offsetof(struct hw_aq_info, phy_temperature),
+ &cable_len_res,
+ sizeof(cable_len_res) / sizeof(u32));
+
+ if (err)
+ return err;
+
+ *cable_len = (cable_len_res >> 16) & 0xFF;
+ return 0;
+}
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+
+static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
+{
+ int err = 0;
+ struct hw_aq_atl_utils_fw_rpc *rpc = NULL;
+ struct offload_info *cfg = NULL;
+ unsigned int rpc_size = 0U;
+ u32 mpi_opts;
+
+ rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg);
+
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
+
+ memset(rpc, 0, rpc_size);
+ cfg = (struct offload_info *)(&rpc->msg_id + 1);
+
+ memcpy(cfg->mac_addr, mac, ETH_ALEN);
+ cfg->len = sizeof(*cfg);
+
+ /* Clear bit 0x36C.23 */
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ mpi_opts &= ~HW_ATL_FW2X_CAP_SLEEP_PROXY;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+
+ /* Set bit 0x36C.23 */
+ mpi_opts |= HW_ATL_FW2X_CAP_SLEEP_PROXY;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ HW_ATL_FW2X_CAP_SLEEP_PROXY), 1U, 10000U);
+err_exit:
+ return err;
+}
+
+static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
+{
+ int err = 0;
+ struct fw2x_msg_wol *msg = NULL;
+ u32 mpi_opts;
+
+ struct hw_aq_atl_utils_fw_rpc *rpc = NULL;
+
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
+
+ msg = (struct fw2x_msg_wol *)rpc;
+
+ msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
+ msg->magic_packet_enabled = true;
+ memcpy(msg->hw_addr, mac, ETH_ALEN);
+
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ mpi_opts &= ~(HW_ATL_FW2X_CAP_SLEEP_PROXY | HW_ATL_FW2X_CAP_WOL);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg));
+ if (err < 0)
+ goto err_exit;
+
+ /* Set bit 0x36C.24 */
+ mpi_opts |= HW_ATL_FW2X_CAP_WOL;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ HW_ATL_FW2X_CAP_WOL), 1U, 10000U);
+err_exit:
+ return err;
+}
+
+static int aq_fw2x_set_power(struct aq_hw_s *self,
+ unsigned int power_state __rte_unused,
+ u8 *mac)
+{
+ int err = 0;
+
+ if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
+ err = aq_fw2x_set_sleep_proxy(self, mac);
+ if (err < 0)
+ goto err_exit;
+ err = aq_fw2x_set_wol_params(self, mac);
+ if (err < 0)
+ goto err_exit;
+ }
+err_exit:
+ return err;
+}
+
+static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ mpi_opts &= ~(HW_ATL_FW2X_CAP_EEE_1G_MASK |
+ HW_ATL_FW2X_CAP_EEE_2G5_MASK | HW_ATL_FW2X_CAP_EEE_5G_MASK |
+ HW_ATL_FW2X_CAP_EEE_10G_MASK);
+
+ if (speed & AQ_NIC_RATE_EEE_10G)
+ mpi_opts |= HW_ATL_FW2X_CAP_EEE_10G_MASK;
+
+ if (speed & AQ_NIC_RATE_EEE_5G)
+ mpi_opts |= HW_ATL_FW2X_CAP_EEE_5G_MASK;
+
+ if (speed & AQ_NIC_RATE_EEE_2G5)
+ mpi_opts |= HW_ATL_FW2X_CAP_EEE_2G5_MASK;
+
+ if (speed & AQ_NIC_RATE_EEE_1G)
+ mpi_opts |= HW_ATL_FW2X_CAP_EEE_1G_MASK;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
+ u32 *supported_rates)
+{
+ int err = 0;
+ u32 caps_hi;
+ u32 mpi_state;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->mbox_addr +
+ offsetof(struct hw_aq_atl_utils_mbox, info) +
+ offsetof(struct hw_aq_info, caps_hi),
+ &caps_hi,
+ sizeof(caps_hi) / sizeof(u32));
+
+ if (err)
+ return err;
+
+ *supported_rates = fw2x_to_eee_mask(caps_hi);
+
+ mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+ *rate = fw2x_to_eee_mask(mpi_state);
+
+ return err;
+}
+
+
+
+static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+ return 0;
+}
+
+static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
+{
+ if (self->fw_ver_actual < HW_ATL_FW_FEATURE_LED)
+ return -EOPNOTSUPP;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode);
+ return 0;
+}
+
+static int aq_fw2x_get_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
+{
+ int err = 0;
+ struct smbus_read_request request;
+ u32 mpi_opts;
+ u32 result = 0;
+
+ if (self->fw_ver_actual < HW_ATL_FW_FEATURE_EEPROM)
+ return -EOPNOTSUPP;
+
+ request.device_id = SMBUS_DEVICE_ID;
+ request.address = 0;
+ request.length = len;
+
+ /* Write SMBUS request to cfg memory */
+ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+ (u32 *)(void *)&request,
+ RTE_ALIGN(sizeof(request), sizeof(u32)));
+
+ if (err < 0)
+ return err;
+
+ /* Toggle 0x368.SMBUS_READ_REQUEST bit */
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR);
+ mpi_opts ^= SMBUS_READ_REQUEST;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts);
+
+ /* Wait until REQUEST_BIT matched in 0x370 */
+
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) &
+ SMBUS_READ_REQUEST) == (mpi_opts & SMBUS_READ_REQUEST),
+ 10U, 10000U);
+
+ if (err < 0)
+ return err;
+
+ err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32),
+ &result,
+ RTE_ALIGN(sizeof(result), sizeof(u32)));
+
+ if (err < 0)
+ return err;
+
+ if (result == 0) {
+ err = hw_atl_utils_fw_downld_dwords(self,
+ self->rpc_addr + sizeof(u32) * 2,
+ data,
+ RTE_ALIGN(len, sizeof(u32)));
+
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+
+static int aq_fw2x_set_eeprom(struct aq_hw_s *self, u32 *data, u32 len)
+{
+ struct smbus_write_request request;
+ u32 mpi_opts, result = 0;
+ int err = 0;
+
+ if (self->fw_ver_actual < HW_ATL_FW_FEATURE_EEPROM)
+ return -EOPNOTSUPP;
+
+ request.device_id = SMBUS_DEVICE_ID;
+ request.address = 0;
+ request.length = len;
+
+ /* Write SMBUS request to cfg memory */
+ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+ (u32 *)(void *)&request,
+ RTE_ALIGN(sizeof(request), sizeof(u32)));
+
+ if (err < 0)
+ return err;
+
+ /* Write SMBUS data to cfg memory */
+ err = hw_atl_utils_fw_upload_dwords(self,
+ self->rpc_addr + sizeof(request),
+ (u32 *)(void *)data,
+ RTE_ALIGN(len, sizeof(u32)));
+
+ if (err < 0)
+ return err;
+
+ /* Toggle 0x368.SMBUS_WRITE_REQUEST bit */
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR);
+ mpi_opts ^= SMBUS_WRITE_REQUEST;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, mpi_opts);
+
+ /* Wait until REQUEST_BIT matched in 0x370 */
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR) &
+ SMBUS_WRITE_REQUEST) == (mpi_opts & SMBUS_WRITE_REQUEST),
+ 10U, 10000U);
+
+ if (err < 0)
+ return err;
+
+ /* Read status of write operation */
+ err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr + sizeof(u32),
+ &result,
+ RTE_ALIGN(sizeof(result), sizeof(u32)));
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+const struct aq_fw_ops aq_fw_2x_ops = {
+ .init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
+ .reset = NULL,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
+ .update_link_status = aq_fw2x_update_link_status,
+ .update_stats = aq_fw2x_update_stats,
+ .set_power = aq_fw2x_set_power,
+ .get_temp = aq_fw2x_get_temp,
+ .get_cable_len = aq_fw2x_get_cable_len,
+ .set_eee_rate = aq_fw2x_set_eee_rate,
+ .get_eee_rate = aq_fw2x_get_eee_rate,
+ .set_flow_control = aq_fw2x_set_flow_control,
+ .led_control = aq_fw2x_led_control,
+ .get_eeprom = aq_fw2x_get_eeprom,
+ .set_eeprom = aq_fw2x_set_eeprom,
+};
diff --git a/drivers/net/atlantic/meson.build b/drivers/net/atlantic/meson.build
new file mode 100644
index 00000000..28fb97ca
--- /dev/null
+++ b/drivers/net/atlantic/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Aquantia Corporation
+
+sources = files(
+ 'atl_rxtx.c',
+ 'atl_ethdev.c',
+ 'atl_hw_regs.c',
+ 'hw_atl/hw_atl_b0.c',
+ 'hw_atl/hw_atl_llh.c',
+ 'hw_atl/hw_atl_utils_fw2x.c',
+ 'hw_atl/hw_atl_utils.c',
+)
diff --git a/drivers/net/atlantic/rte_pmd_atlantic_version.map b/drivers/net/atlantic/rte_pmd_atlantic_version.map
new file mode 100644
index 00000000..521e51f4
--- /dev/null
+++ b/drivers/net/atlantic/rte_pmd_atlantic_version.map
@@ -0,0 +1,4 @@
+DPDK_18.11 {
+
+ local: *;
+};
diff --git a/drivers/net/avf/Makefile b/drivers/net/avf/Makefile
index 3f815bbc..aec6e4cb 100644
--- a/drivers/net/avf/Makefile
+++ b/drivers/net/avf/Makefile
@@ -8,7 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_avf.a
-CFLAGS += -O3
+CFLAGS += -O3 $(WERROR_FLAGS) -Wno-strict-aliasing -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
LDLIBS += -lrte_bus_pci
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
index 3a2baaf2..13eec1b4 100644
--- a/drivers/net/avf/avf_ethdev.c
+++ b/drivers/net/avf/avf_ethdev.c
@@ -154,7 +154,6 @@ static int
avf_init_rss(struct avf_adapter *adapter)
{
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
- struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
struct rte_eth_rss_conf *rss_conf;
uint8_t i, j, nb_q;
int ret;
@@ -259,11 +258,8 @@ avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
static int
avf_init_queues(struct rte_eth_dev *dev)
{
- struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct avf_rx_queue **rxq =
(struct avf_rx_queue **)dev->data->rx_queues;
- struct avf_tx_queue **txq =
- (struct avf_tx_queue **)dev->data->tx_queues;
int i, ret = AVF_SUCCESS;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -415,7 +411,6 @@ avf_dev_start(struct rte_eth_dev *dev)
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -476,9 +471,7 @@ avf_dev_stop(struct rte_eth_dev *dev)
struct avf_adapter *adapter =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = dev->intr_handle;
- int ret, i;
PMD_INIT_FUNC_TRACE();
@@ -503,8 +496,6 @@ avf_dev_stop(struct rte_eth_dev *dev)
static void
avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct avf_adapter *adapter =
- AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
@@ -523,8 +514,6 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -569,7 +558,7 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static const uint32_t *
-avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+avf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -915,7 +904,6 @@ avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint32_t frame_size = mtu + AVF_ETH_OVERHEAD;
int ret = 0;
@@ -1045,8 +1033,6 @@ avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct avf_adapter *adapter =
- AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -1089,7 +1075,7 @@ avf_check_vf_reset_done(struct avf_hw *hw)
static int
avf_init_vf(struct rte_eth_dev *dev)
{
- int i, err, bufsz;
+ int err, bufsz;
struct avf_adapter *adapter =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1198,7 +1184,6 @@ avf_dev_interrupt_handler(void *param)
avf_handle_virtchnl_msg(dev);
-done:
avf_enable_irq0(hw);
}
@@ -1318,9 +1303,6 @@ avf_dev_uninit(struct rte_eth_dev *dev)
rte_free(vf->aq_resp);
vf->aq_resp = NULL;
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
if (vf->rss_lut) {
rte_free(vf->rss_lut);
vf->rss_lut = NULL;
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index e03a136f..8c7a9672 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -247,7 +247,6 @@ alloc_rxq_mbufs(struct avf_rx_queue *rxq)
static inline void
release_rxq_mbufs(struct avf_rx_queue *rxq)
{
- struct rte_mbuf *mbuf;
uint16_t i;
if (!rxq->sw_ring)
@@ -310,9 +309,8 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
struct avf_rx_queue *rxq;
const struct rte_memzone *mz;
uint32_t ring_size;
- uint16_t len, i;
+ uint16_t len;
uint16_t rx_free_thresh;
- uint16_t base, bsf, tc_mapping;
PMD_INIT_FUNC_TRACE();
@@ -428,13 +426,10 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf)
{
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct avf_adapter *ad =
- AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_tx_queue *txq;
const struct rte_memzone *mz;
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
- uint16_t i, base, bsf, tc_mapping;
uint64_t offloads;
PMD_INIT_FUNC_TRACE();
@@ -515,8 +510,11 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->ops = &def_txq_ops;
#ifdef RTE_LIBRTE_AVF_INC_VECTOR
- if (check_tx_vec_allow(txq) == FALSE)
+ if (check_tx_vec_allow(txq) == FALSE) {
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
ad->tx_vec_allowed = false;
+ }
#endif
return 0;
@@ -1268,7 +1266,6 @@ static inline uint16_t
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct avf_rx_queue *rxq = (struct avf_rx_queue *)rx_queue;
- struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
@@ -1584,10 +1581,6 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (nb_ctx) {
/* Setup TX context descriptor if required */
- volatile struct avf_tx_context_desc *ctx_txd =
- (volatile struct avf_tx_context_desc *)
- &txr[tx_id];
- uint16_t cd_l2tag2 = 0;
uint64_t cd_type_cmd_tso_mss =
AVF_TX_DESC_DTYPE_CONTEXT;
@@ -1603,7 +1596,7 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
cd_type_cmd_tso_mss |=
avf_set_tso_ctx(tx_pkt, tx_offload);
- AVF_DUMP_TX_DESC(txq, ctx_txd, tx_id);
+ AVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
@@ -1925,7 +1918,7 @@ avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_recv_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
@@ -1933,7 +1926,7 @@ avf_recv_pkts_vec(__rte_unused void *rx_queue,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
@@ -1941,7 +1934,7 @@ avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
@@ -1949,13 +1942,13 @@ avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
return 0;
}
-int __attribute__((weak))
+__rte_weak int
avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
{
return -1;
}
-int __attribute__((weak))
+__rte_weak int
avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
{
return -1;
diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h
index 297d0776..898d2f38 100644
--- a/drivers/net/avf/avf_rxtx.h
+++ b/drivers/net/avf/avf_rxtx.h
@@ -201,17 +201,17 @@ int avf_txq_vec_setup(struct avf_tx_queue *txq);
static inline
void avf_dump_rx_descriptor(struct avf_rx_queue *rxq,
- const void *desc,
+ const volatile void *desc,
uint16_t rx_id)
{
#ifdef RTE_LIBRTE_AVF_16BYTE_RX_DESC
- const union avf_16byte_rx_desc *rx_desc = desc;
+ const volatile union avf_16byte_rx_desc *rx_desc = desc;
printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
rx_desc->read.hdr_addr);
#else
- const union avf_32byte_rx_desc *rx_desc = desc;
+ const volatile union avf_32byte_rx_desc *rx_desc = desc;
printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
" QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
@@ -225,10 +225,10 @@ void avf_dump_rx_descriptor(struct avf_rx_queue *rxq,
*/
static inline
void avf_dump_tx_descriptor(const struct avf_tx_queue *txq,
- const void *desc, uint16_t tx_id)
+ const volatile void *desc, uint16_t tx_id)
{
- char *name;
- const struct avf_tx_desc *tx_desc = desc;
+ const char *name;
+ const volatile struct avf_tx_desc *tx_desc = desc;
enum avf_tx_desc_dtype_value type;
type = (enum avf_tx_desc_dtype_value)rte_le_to_cpu_64(
diff --git a/drivers/net/avf/avf_rxtx_vec_sse.c b/drivers/net/avf/avf_rxtx_vec_sse.c
index 8275100f..343a6aac 100644
--- a/drivers/net/avf/avf_rxtx_vec_sse.c
+++ b/drivers/net/avf/avf_rxtx_vec_sse.c
@@ -621,7 +621,7 @@ avf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-void __attribute__((cold))
+static void __attribute__((cold))
avf_rx_queue_release_mbufs_sse(struct avf_rx_queue *rxq)
{
_avf_rx_queue_release_mbufs_vec(rxq);
diff --git a/drivers/net/avf/avf_vchnl.c b/drivers/net/avf/avf_vchnl.c
index fa71014e..fd90cc2c 100644
--- a/drivers/net/avf/avf_vchnl.c
+++ b/drivers/net/avf/avf_vchnl.c
@@ -69,7 +69,6 @@ avf_execute_vf_cmd(struct avf_adapter *adapter, struct avf_cmd_info *args)
{
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
- struct avf_arq_event_info event_info;
enum avf_status_code ret;
int err = 0;
int i = 0;
@@ -600,7 +599,6 @@ avf_config_irq_map(struct avf_adapter *adapter)
struct virtchnl_irq_map_info *map_info;
struct virtchnl_vector_map *vecmap;
struct avf_cmd_info args;
- uint32_t vector_id;
int len, i, err;
len = sizeof(struct virtchnl_irq_map_info) +
diff --git a/drivers/net/avf/base/avf_osdep.h b/drivers/net/avf/base/avf_osdep.h
index 9ef45968..442a5acd 100644
--- a/drivers/net/avf/base/avf_osdep.h
+++ b/drivers/net/avf/base/avf_osdep.h
@@ -93,8 +93,8 @@ typedef uint64_t u64;
#define avf_memset(a, b, c, d) memset((a), (b), (c))
#define avf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
-#define avf_usec_delay(x) rte_delay_us(x)
-#define avf_msec_delay(x) rte_delay_us(1000*(x))
+#define avf_usec_delay(x) rte_delay_us_sleep(x)
+#define avf_msec_delay(x) avf_usec_delay(1000 * (x))
#define AVF_PCI_REG(reg) rte_read32(reg)
#define AVF_PCI_REG_ADDR(a, reg) \
diff --git a/drivers/net/avf/base/meson.build b/drivers/net/avf/base/meson.build
new file mode 100644
index 00000000..6f3d7192
--- /dev/null
+++ b/drivers/net/avf/base/meson.build
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+sources = [
+ 'avf_adminq.c',
+ 'avf_common.c',
+]
+
+error_cflags = ['-Wno-pointer-to-int-cast']
+c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('avf_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/avf/meson.build b/drivers/net/avf/meson.build
new file mode 100644
index 00000000..2dfda9d4
--- /dev/null
+++ b/drivers/net/avf/meson.build
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+cflags += ['-Wno-strict-aliasing']
+
+allow_experimental_apis = true
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'avf_ethdev.c',
+ 'avf_rxtx.c',
+ 'avf_vchnl.c',
+)
+
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_LIBRTE_AVF_INC_VECTOR', 1)
+ sources += files('avf_rxtx_vec_sse.c')
+endif
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 761f6c1c..09388d05 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -1036,11 +1036,6 @@ eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
return ret;
}
- if (eth_dev->data->mac_addrs != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- }
-
return 0;
}
@@ -2170,7 +2165,6 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
}
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
diff --git a/drivers/net/avp/meson.build b/drivers/net/avp/meson.build
index 6076c31b..b7ffdfc8 100644
--- a/drivers/net/avp/meson.build
+++ b/drivers/net/avp/meson.build
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
+if host_machine.system() != 'linux'
+ build = false
+endif
sources = files('avp_ethdev.c')
install_headers('rte_avp_common.h', 'rte_avp_fifo.h')
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 9ae9f063..e89c0ec2 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -364,7 +364,6 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
@@ -719,9 +718,6 @@ eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
- /*Free macaddres*/
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index c5fd5f41..b5a29a95 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -74,7 +74,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
(DMA_CH_INC * rxq->queue_id));
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
DMA_CH_RDTR_LO);
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 4904eaf3..27975936 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -112,6 +112,7 @@ static void bnx2x_pf_disable(struct bnx2x_softc *sc);
static void bnx2x_update_rx_prod(struct bnx2x_softc *sc,
struct bnx2x_fastpath *fp,
uint16_t rx_bd_prod, uint16_t rx_cq_prod);
+static void bnx2x_link_report_locked(struct bnx2x_softc *sc);
static void bnx2x_link_report(struct bnx2x_softc *sc);
void bnx2x_link_status_update(struct bnx2x_softc *sc);
static int bnx2x_alloc_mem(struct bnx2x_softc *sc);
@@ -178,13 +179,14 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, align);
if (z == NULL) {
- PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
+ PMD_DRV_LOG(ERR, sc, "DMA alloc failed for %s", msg);
return -ENOMEM;
}
dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
- PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
+ PMD_DRV_LOG(DEBUG, sc,
+ "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
return 0;
}
@@ -197,11 +199,12 @@ static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
uint32_t hw_lock_control_reg;
int cnt;
- PMD_INIT_FUNC_TRACE();
+ if (resource)
+ PMD_INIT_FUNC_TRACE(sc);
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
resource);
return -1;
@@ -217,7 +220,7 @@ static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
/* validate the resource is not already taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (lock_status & resource_bit) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"resource in use (status 0x%x bit 0x%x)",
lock_status, resource_bit);
return -1;
@@ -233,7 +236,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
DELAY(5000);
}
- PMD_DRV_LOG(NOTICE, "Resource lock timeout!");
+ PMD_DRV_LOG(NOTICE, sc, "Resource 0x%x resource_bit 0x%x lock timeout!",
+ resource, resource_bit);
return -1;
}
@@ -244,13 +248,14 @@ static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
int func = SC_FUNC(sc);
uint32_t hw_lock_control_reg;
- PMD_INIT_FUNC_TRACE();
+ if (resource)
+ PMD_INIT_FUNC_TRACE(sc);
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- PMD_DRV_LOG(NOTICE,
- "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
- resource);
+ PMD_DRV_LOG(NOTICE, sc,
+ "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
+ " resource_bit 0x%x", resource, resource_bit);
return -1;
}
@@ -264,7 +269,7 @@ static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
/* validate the resource is currently taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"resource not in use (status 0x%x bit 0x%x)",
lock_status, resource_bit);
return -1;
@@ -274,6 +279,18 @@ static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
return 0;
}
+static void bnx2x_acquire_phy_lock(struct bnx2x_softc *sc)
+{
+ BNX2X_PHY_LOCK(sc);
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
+}
+
+static void bnx2x_release_phy_lock(struct bnx2x_softc *sc)
+{
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
+ BNX2X_PHY_UNLOCK(sc);
+}
+
/* copy command into DMAE command memory and set DMAE command Go */
void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx)
{
@@ -366,7 +383,7 @@ bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae)
if (!timeout ||
(sc->recovery_state != BNX2X_RECOVERY_DONE &&
sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
- PMD_DRV_LOG(INFO, "DMAE timeout!");
+ PMD_DRV_LOG(INFO, sc, "DMAE timeout!");
return DMAE_TIMEOUT;
}
@@ -375,7 +392,7 @@ bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae)
}
if (*wb_comp & DMAE_PCI_ERR_FLAG) {
- PMD_DRV_LOG(INFO, "DMAE PCI error!");
+ PMD_DRV_LOG(INFO, sc, "DMAE PCI error!");
return DMAE_PCI_ERROR;
}
@@ -534,7 +551,7 @@ void
elink_cb_event_log(__rte_unused struct bnx2x_softc *sc,
__rte_unused const elink_log_id_t elink_log_id, ...)
{
- PMD_DRV_LOG(DEBUG, "ELINK EVENT LOG (%d)", elink_log_id);
+ PMD_DRV_LOG(DEBUG, sc, "ELINK EVENT LOG (%d)", elink_log_id);
}
static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode)
@@ -543,7 +560,7 @@ static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode)
/* Only 2 SPIOs are configurable */
if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
- PMD_DRV_LOG(NOTICE, "Invalid SPIO 0x%x", spio);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid SPIO 0x%x", spio);
return -1;
}
@@ -593,7 +610,7 @@ static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port)
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num);
return -1;
}
@@ -618,7 +635,7 @@ bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t po
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num);
return -1;
}
@@ -687,7 +704,8 @@ bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode)
break;
default:
- PMD_DRV_LOG(NOTICE, "Invalid GPIO mode assignment %d", mode);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Invalid GPIO mode assignment %d", mode);
bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
return -1;
}
@@ -713,7 +731,7 @@ bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode,
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num);
return -1;
}
@@ -790,7 +808,7 @@ elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"wrote command 0x%08x to FW MB param 0x%08x",
(command | seq), param);
@@ -805,7 +823,7 @@ elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
rc &= FW_MSG_CODE_MASK;
} else {
/* Ruh-roh! */
- PMD_DRV_LOG(NOTICE, "FW failed to respond!");
+ PMD_DRV_LOG(NOTICE, sc, "FW failed to respond!");
rc = 0;
}
@@ -1023,12 +1041,12 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
if (common) {
if (!atomic_load_acq_long(&sc->eq_spq_left)) {
- PMD_DRV_LOG(INFO, "EQ ring is full!");
+ PMD_DRV_LOG(INFO, sc, "EQ ring is full!");
return -1;
}
} else {
if (!atomic_load_acq_long(&sc->cq_spq_left)) {
- PMD_DRV_LOG(INFO, "SPQ ring is full!");
+ PMD_DRV_LOG(INFO, sc, "SPQ ring is full!");
return -1;
}
}
@@ -1061,7 +1079,7 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
atomic_subtract_acq_long(&sc->cq_spq_left, 1);
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x"
"data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)",
sc->spq_prod_idx,
@@ -1132,44 +1150,45 @@ bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"fp=%d cid=%d got ramrod #%d state is %x type is %d",
fp->index, cid, command, sc->state,
rr_cqe->ramrod_cqe.ramrod_type);
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
- PMD_DRV_LOG(DEBUG, "got UPDATE ramrod. CID %d", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got UPDATE ramrod. CID %d", cid);
drv_cmd = ECORE_Q_CMD_UPDATE;
break;
case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] setup ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] setup ramrod", cid);
drv_cmd = ECORE_Q_CMD_SETUP;
break;
case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] tx-only setup ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc,
+ "got MULTI[%d] tx-only setup ramrod", cid);
drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
break;
case (RAMROD_CMD_ID_ETH_HALT):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] halt ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] halt ramrod", cid);
drv_cmd = ECORE_Q_CMD_HALT;
break;
case (RAMROD_CMD_ID_ETH_TERMINATE):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] teminate ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid);
drv_cmd = ECORE_Q_CMD_TERMINATE;
break;
case (RAMROD_CMD_ID_ETH_EMPTY):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] empty ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] empty ramrod", cid);
drv_cmd = ECORE_Q_CMD_EMPTY;
break;
default:
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"ERROR: unexpected MC reply (%d)"
"on fp[%d]", command, fp->index);
return;
@@ -1191,7 +1210,7 @@ bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
atomic_add_acq_long(&sc->cq_spq_left, 1);
- PMD_DRV_LOG(DEBUG, "sc->cq_spq_left 0x%lx",
+ PMD_DRV_LOG(DEBUG, sc, "sc->cq_spq_left 0x%lx",
atomic_load_acq_long(&sc->cq_spq_left));
}
@@ -1387,7 +1406,7 @@ bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj,
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc < 0)
- PMD_DRV_LOG(ERR, "Failed to delete MACs (%d)", rc);
+ PMD_DRV_LOG(ERR, sc, "Failed to delete MACs (%d)", rc);
return rc;
}
@@ -1538,13 +1557,13 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc)
int path = SC_PATH(sc);
int port = SC_PORT(sc);
- PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]++;
load_count[path][1 + port]++;
- PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 1)
@@ -1561,12 +1580,12 @@ static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc)
int port = SC_PORT(sc);
int path = SC_PATH(sc);
- PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]--;
load_count[path][1 + port]--;
- PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 0) {
@@ -1646,7 +1665,7 @@ static int bnx2x_func_wait_started(struct bnx2x_softc *sc)
*/
struct ecore_func_state_params func_params = { NULL };
- PMD_DRV_LOG(NOTICE, "Unexpected function state! "
+ PMD_DRV_LOG(NOTICE, sc, "Unexpected function state! "
"Forcing STARTED-->TX_STOPPED-->STARTED");
func_params.f_obj = &sc->func_obj;
@@ -1670,7 +1689,7 @@ static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index)
struct ecore_queue_state_params q_params = { NULL };
int rc;
- PMD_DRV_LOG(DEBUG, "stopping queue %d cid %d", index, fp->index);
+ PMD_DRV_LOG(DEBUG, sc, "stopping queue %d cid %d", index, fp->index);
q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
/* We want to wait for completion in this context */
@@ -1721,7 +1740,7 @@ static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, unsigned long mask)
tmp = atomic_load_acq_long(&sc->sp_state);
if (tmp & mask) {
- PMD_DRV_LOG(INFO, "Filtering completion timed out: "
+ PMD_DRV_LOG(INFO, sc, "Filtering completion timed out: "
"sp_state 0x%lx, mask 0x%lx", tmp, mask);
return FALSE;
}
@@ -1747,7 +1766,7 @@ static int bnx2x_func_stop(struct bnx2x_softc *sc)
*/
rc = ecore_func_state_change(sc, &func_params);
if (rc) {
- PMD_DRV_LOG(NOTICE, "FUNC_STOP ramrod failed. "
+ PMD_DRV_LOG(NOTICE, sc, "FUNC_STOP ramrod failed. "
"Running a dry transaction");
bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
return ecore_func_state_change(sc, &func_params);
@@ -1796,14 +1815,16 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC,
FALSE);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE, "Failed to delete all ETH MACs (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to delete all ETH MACs (%d)", rc);
}
/* Clean up UC list */
rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC,
TRUE);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE, "Failed to delete UC MACs list (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to delete UC MACs list (%d)", rc);
}
/* Disable LLH */
@@ -1826,7 +1847,7 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
rparam.mcast_obj = &sc->mcast_obj;
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Failed to send DEL MCAST command (%d)", rc);
}
@@ -1843,7 +1864,7 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
*/
rc = bnx2x_func_wait_started(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "bnx2x_func_wait_started failed");
+ PMD_DRV_LOG(NOTICE, sc, "bnx2x_func_wait_started failed");
}
/*
@@ -1861,14 +1882,14 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
* very wrong has happen.
*/
if (!bnx2x_wait_sp_comp(sc, ~0x0UL)) {
- PMD_DRV_LOG(NOTICE, "Common slow path ramrods got stuck!");
+ PMD_DRV_LOG(NOTICE, sc, "Common slow path ramrods got stuck!");
}
unload_error:
rc = bnx2x_func_stop(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Function stop failed!");
+ PMD_DRV_LOG(NOTICE, sc, "Function stop failed!");
}
/* disable HW interrupts */
@@ -1877,7 +1898,7 @@ unload_error:
/* Reset the chip */
rc = bnx2x_reset_hw(sc, reset_code);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Hardware reset failed");
+ PMD_DRV_LOG(NOTICE, sc, "Hardware reset failed");
}
/* Report UNLOAD_DONE to MCP */
@@ -1888,7 +1909,7 @@ static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc)
{
uint32_t val;
- PMD_DRV_LOG(DEBUG, "Disabling 'close the gates'");
+ PMD_DRV_LOG(DEBUG, sc, "Disabling 'close the gates'");
val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
@@ -1919,7 +1940,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0) {
- PMD_DRV_LOG(NOTICE, "Failed to clean ETH MACs (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc, "Failed to clean ETH MACs (%d)", rc);
}
/* Cleanup UC list */
@@ -1927,7 +1948,8 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
bnx2x_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc != 0) {
- PMD_DRV_LOG(NOTICE, "Failed to clean UC list MACs (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to clean UC list MACs (%d)", rc);
}
/* Now clean mcast object... */
@@ -1938,7 +1960,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
/* Add a DEL command... */
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Failed to send DEL MCAST command (%d)", rc);
}
@@ -1947,7 +1969,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
while (rc != 0) {
if (rc < 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Failed to clean MCAST object (%d)", rc);
return;
}
@@ -1964,7 +1986,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
uint8_t global = FALSE;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
+ PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload...");
/* mark driver as unloaded in shmem2 */
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
@@ -1988,7 +2010,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
bnx2x_release_leader_lock(sc);
mb();
- PMD_DRV_LOG(NOTICE, "Can't unload in closed or error state");
+ PMD_DRV_LOG(NOTICE, sc, "Can't unload in closed or error state");
return -1;
}
@@ -2093,7 +2115,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
bnx2x_disable_close_the_gate(sc);
}
- PMD_DRV_LOG(DEBUG, "Ended NIC unload");
+ PMD_DRV_LOG(DEBUG, sc, "Ended NIC unload");
return 0;
}
@@ -2241,7 +2263,7 @@ static void bnx2x_ilt_set_info(struct bnx2x_softc *sc)
struct ecore_ilt *ilt = sc->ilt;
uint16_t line = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
@@ -2395,7 +2417,7 @@ static int bnx2x_alloc_mem(struct bnx2x_softc *sc)
bnx2x_alloc_ilt_lines_mem(sc);
if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
- PMD_DRV_LOG(NOTICE, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed");
+ PMD_DRV_LOG(NOTICE, sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed");
bnx2x_free_mem(sc);
return -1;
}
@@ -2598,7 +2620,7 @@ static void bnx2x_set_pf_load(struct bnx2x_softc *sc)
bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
@@ -2651,14 +2673,14 @@ static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc)
/* send load requrest to mcp and analyze response */
static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
{
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* init fw_seq */
sc->fw_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
- PMD_DRV_LOG(DEBUG, "initial fw_seq 0x%04x", sc->fw_seq);
+ PMD_DRV_LOG(DEBUG, sc, "initial fw_seq 0x%04x", sc->fw_seq);
#ifdef BNX2X_PULSE
/* get the current FW pulse sequence */
@@ -2677,13 +2699,13 @@ static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
/* if the MCP fails to respond we must abort */
if (!(*load_code)) {
- PMD_DRV_LOG(NOTICE, "MCP response failure!");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure!");
return -1;
}
/* if MCP refused then must abort */
if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
- PMD_DRV_LOG(NOTICE, "MCP refused load request");
+ PMD_DRV_LOG(NOTICE, sc, "MCP refused load request");
return -1;
}
@@ -2710,12 +2732,12 @@ static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code
/* read loaded FW from chip */
loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
- PMD_DRV_LOG(DEBUG, "loaded FW 0x%08x / my FW 0x%08x",
+ PMD_DRV_LOG(DEBUG, sc, "loaded FW 0x%08x / my FW 0x%08x",
loaded_fw, my_fw);
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"FW 0x%08x already loaded (mine is 0x%08x)",
loaded_fw, my_fw);
return -1;
@@ -2730,7 +2752,7 @@ static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code)
{
uint32_t ncsi_oem_data_addr;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
@@ -2745,7 +2767,7 @@ static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code)
sc->port.pmf = 0;
}
- PMD_DRV_LOG(DEBUG, "pmf %d", sc->port.pmf);
+ PMD_DRV_LOG(DEBUG, sc, "pmf %d", sc->port.pmf);
if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
@@ -2788,10 +2810,10 @@ static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc)
if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
FUNC_MF_CFG_FUNC_DISABLED) {
- PMD_DRV_LOG(DEBUG, "mf_cfg function disabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled");
sc->flags |= BNX2X_MF_FUNC_DIS;
} else {
- PMD_DRV_LOG(DEBUG, "mf_cfg function enabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled");
sc->flags &= ~BNX2X_MF_FUNC_DIS;
}
}
@@ -2812,7 +2834,7 @@ static int bnx2x_acquire_alr(struct bnx2x_softc *sc)
}
if (!(val & (1L << 31))) {
- PMD_DRV_LOG(NOTICE, "Cannot acquire MCP access lock register");
+ PMD_DRV_LOG(NOTICE, sc, "Cannot acquire MCP access lock register");
return -1;
}
@@ -2840,7 +2862,7 @@ static void bnx2x_fan_failure(struct bnx2x_softc *sc)
ext_phy_config);
/* log the failure */
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Fan Failure has caused the driver to shutdown "
"the card to prevent permanent damage. "
"Please contact OEM Support for assistance");
@@ -2897,7 +2919,7 @@ static void bnx2x_link_attn(struct bnx2x_softc *sc)
}
}
- bnx2x_link_report(sc);
+ bnx2x_link_report_locked(sc);
if (IS_MF(sc)) {
bnx2x_link_sync_notify(sc);
@@ -2918,7 +2940,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
uint32_t cnt;
if (sc->attn_state & asserted) {
- PMD_DRV_LOG(ERR, "IGU ERROR attn=0x%08x", asserted);
+ PMD_DRV_LOG(ERR, sc, "IGU ERROR attn=0x%08x", asserted);
}
bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
@@ -2936,6 +2958,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
if (asserted & ATTN_HARD_WIRED_MASK) {
if (asserted & ATTN_NIG_FOR_FUNC) {
+ bnx2x_acquire_phy_lock(sc);
/* save nig interrupt mask */
nig_mask = REG_RD(sc, nig_int_mask_addr);
@@ -2950,45 +2973,45 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
}
if (asserted & ATTN_SW_TIMER_4_FUNC) {
- PMD_DRV_LOG(DEBUG, "ATTN_SW_TIMER_4_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_SW_TIMER_4_FUNC!");
}
if (asserted & GPIO_2_FUNC) {
- PMD_DRV_LOG(DEBUG, "GPIO_2_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "GPIO_2_FUNC!");
}
if (asserted & GPIO_3_FUNC) {
- PMD_DRV_LOG(DEBUG, "GPIO_3_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "GPIO_3_FUNC!");
}
if (asserted & GPIO_4_FUNC) {
- PMD_DRV_LOG(DEBUG, "GPIO_4_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "GPIO_4_FUNC!");
}
if (port == 0) {
if (asserted & ATTN_GENERAL_ATTN_1) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_1!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_1!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_2) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_2!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_2!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_3) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_3!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_3!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
}
} else {
if (asserted & ATTN_GENERAL_ATTN_4) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_4!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_4!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_5) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_5!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_5!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_6) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_6!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_6!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
}
}
@@ -3002,7 +3025,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8);
}
- PMD_DRV_LOG(DEBUG, "about to mask 0x%08x at %s addr 0x%08x",
+ PMD_DRV_LOG(DEBUG, sc, "about to mask 0x%08x at %s addr 0x%08x",
asserted,
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU",
reg_addr);
@@ -3024,7 +3047,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
&& (++cnt < MAX_IGU_ATTN_ACK_TO));
if (!igu_acked) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to verify IGU ack on time");
}
@@ -3033,6 +3056,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
REG_WR(sc, nig_int_mask_addr, nig_mask);
+ bnx2x_release_phy_lock(sc);
}
}
@@ -3040,7 +3064,7 @@ static void
bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx,
__rte_unused const char *blk)
{
- PMD_DRV_LOG(INFO, "%s%s", idx ? ", " : "", blk);
+ PMD_DRV_LOG(INFO, sc, "%s%s", idx ? ", " : "", blk);
}
static int
@@ -3348,7 +3372,7 @@ bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print,
(sig[2] & HW_PRTY_ASSERT_SET_2) ||
(sig[3] & HW_PRTY_ASSERT_SET_3) ||
(sig[4] & HW_PRTY_ASSERT_SET_4)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Parity error: HW block parity attention:"
"[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x",
(uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0),
@@ -3358,7 +3382,7 @@ bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print,
(uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4));
if (print)
- PMD_DRV_LOG(INFO, "Parity errors detected in blocks: ");
+ PMD_DRV_LOG(INFO, sc, "Parity errors detected in blocks: ");
par_num =
bnx2x_check_blocks_with_parity0(sc, sig[0] &
@@ -3382,7 +3406,7 @@ bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print,
par_num, print);
if (print)
- PMD_DRV_LOG(INFO, "");
+ PMD_DRV_LOG(INFO, sc, "");
return TRUE;
}
@@ -3414,64 +3438,64 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn)
if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
- PMD_DRV_LOG(INFO, "ERROR: PGLUE hw attention 0x%08x", val);
+ PMD_DRV_LOG(INFO, sc, "ERROR: PGLUE hw attention 0x%08x", val);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW");
}
if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
- PMD_DRV_LOG(INFO, "ERROR: ATC hw attention 0x%08x", val);
+ PMD_DRV_LOG(INFO, sc, "ERROR: ATC hw attention 0x%08x", val);
if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND");
if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS");
if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR");
if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU");
}
if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: FATAL parity attention set4 0x%08x",
(uint32_t) (attn &
(AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
@@ -3594,11 +3618,11 @@ static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event)
*/
if (sc->devinfo.
mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
- PMD_DRV_LOG(DEBUG, "mf_cfg function disabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled");
sc->flags |= BNX2X_MF_FUNC_DIS;
bnx2x_e1h_disable(sc);
} else {
- PMD_DRV_LOG(DEBUG, "mf_cfg function enabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled");
sc->flags &= ~BNX2X_MF_FUNC_DIS;
bnx2x_e1h_enable(sc);
}
@@ -3653,7 +3677,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx)
- PMD_DRV_LOG(ERR, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
/* print the asserts */
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
@@ -3675,7 +3699,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3688,7 +3712,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
- PMD_DRV_LOG(ERR, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
}
/* print the asserts */
@@ -3711,7 +3735,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3724,7 +3748,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
- PMD_DRV_LOG(ERR, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
}
/* print the asserts */
@@ -3747,7 +3771,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3760,7 +3784,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
- PMD_DRV_LOG(ERR, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
}
/* print the asserts */
@@ -3783,7 +3807,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3832,8 +3856,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn)
if (sc->link_vars.periodic_flags &
ELINK_PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
+ bnx2x_acquire_phy_lock(sc);
sc->link_vars.periodic_flags &=
~ELINK_PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(sc);
if (IS_MF(sc)) {
bnx2x_link_sync_notify(sc);
}
@@ -3848,7 +3874,7 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn)
} else if (attn & BNX2X_MC_ASSERT_BITS) {
- PMD_DRV_LOG(ERR, "MC assert!");
+ PMD_DRV_LOG(ERR, sc, "MC assert!");
bnx2x_mc_assert(sc);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
@@ -3858,24 +3884,24 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn)
} else if (attn & BNX2X_MCP_ASSERT) {
- PMD_DRV_LOG(ERR, "MCP assert!");
+ PMD_DRV_LOG(ERR, sc, "MCP assert!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Unknown HW assert! (attn 0x%08x)", attn);
}
}
if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
- PMD_DRV_LOG(ERR, "LATCHED attention 0x%08x (masked)", attn);
+ PMD_DRV_LOG(ERR, sc, "LATCHED attention 0x%08x (masked)", attn);
if (attn & BNX2X_GRC_TIMEOUT) {
val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
- PMD_DRV_LOG(ERR, "GRC time-out 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "GRC time-out 0x%08x", val);
}
if (attn & BNX2X_GRC_RSV) {
val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
- PMD_DRV_LOG(ERR, "GRC reserved 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "GRC reserved 0x%08x", val);
}
REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
}
@@ -3890,24 +3916,24 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
- PMD_DRV_LOG(ERR, "CFC hw attention 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "CFC hw attention 0x%08x", val);
/* CFC error attention */
if (val & 0x2) {
- PMD_DRV_LOG(ERR, "FATAL error from CFC");
+ PMD_DRV_LOG(ERR, sc, "FATAL error from CFC");
}
}
if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
- PMD_DRV_LOG(ERR, "PXP hw attention-0 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "PXP hw attention-0 0x%08x", val);
/* RQ_USDMDP_FIFO_OVERFLOW */
if (val & 0x18000) {
- PMD_DRV_LOG(ERR, "FATAL error from PXP");
+ PMD_DRV_LOG(ERR, sc, "FATAL error from PXP");
}
if (!CHIP_IS_E1x(sc)) {
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
- PMD_DRV_LOG(ERR, "PXP hw attention-1 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "PXP hw attention-1 0x%08x", val);
}
}
#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
@@ -3935,7 +3961,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
/* print the register, since no one can restore it */
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0);
/*
@@ -3943,7 +3969,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
* then notify
*/
if (val0 & PXP2_EOP_ERROR_BIT) {
- PMD_DRV_LOG(ERR, "PXP2_WR_PGLUE_EOP_ERROR");
+ PMD_DRV_LOG(ERR, sc, "PXP2_WR_PGLUE_EOP_ERROR");
/*
* if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
@@ -3964,7 +3990,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
REG_WR(sc, reg_offset, val);
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"FATAL HW block attention set2 0x%x",
(uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2));
rte_panic("HW block attention set2");
@@ -3979,10 +4005,10 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn)
if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
- PMD_DRV_LOG(ERR, "DB hw attention 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "DB hw attention 0x%08x", val);
/* DORQ discard attention */
if (val & 0x2) {
- PMD_DRV_LOG(ERR, "FATAL error from DORQ");
+ PMD_DRV_LOG(ERR, sc, "FATAL error from DORQ");
}
}
@@ -3994,7 +4020,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn)
val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
REG_WR(sc, reg_offset, val);
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"FATAL HW block attention set1 0x%08x",
(uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1));
rte_panic("HW block attention set1");
@@ -4015,7 +4041,7 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn)
val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
REG_WR(sc, reg_offset, val);
- PMD_DRV_LOG(WARNING, "SPIO5 hw attention");
+ PMD_DRV_LOG(WARNING, sc, "SPIO5 hw attention");
/* Fan failure attention */
elink_hw_reset_phy(&sc->link_params);
@@ -4023,7 +4049,9 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn)
}
if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
+ bnx2x_acquire_phy_lock(sc);
elink_handle_module_detect_int(&sc->link_params);
+ bnx2x_release_phy_lock(sc);
}
if (attn & HW_INTERRUT_ASSERT_SET_0) {
@@ -4105,14 +4133,14 @@ static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserte
}
val = ~deasserted;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"about to mask 0x%08x at %s addr 0x%08x", val,
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU",
reg_addr);
REG_WR(sc, reg_addr, val);
if (~sc->attn_state & deasserted) {
- PMD_DRV_LOG(ERR, "IGU error");
+ PMD_DRV_LOG(ERR, sc, "IGU error");
}
reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
@@ -4142,12 +4170,12 @@ static void bnx2x_attn_int(struct bnx2x_softc *sc)
uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x",
attn_bits, attn_ack, asserted, deasserted);
if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
- PMD_DRV_LOG(ERR, "BAD attention state");
+ PMD_DRV_LOG(ERR, sc, "BAD attention state");
}
/* handle bits that were raised */
@@ -4204,7 +4232,7 @@ static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc)
if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
if (rc < 0) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Failed to send pending mcast commands (%d)",
rc);
}
@@ -4224,17 +4252,17 @@ bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *e
switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) {
case ECORE_FILTER_MAC_PENDING:
- PMD_DRV_LOG(DEBUG, "Got SETUP_MAC completions");
+ PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MAC completions");
vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
break;
case ECORE_FILTER_MCAST_PENDING:
- PMD_DRV_LOG(DEBUG, "Got SETUP_MCAST completions");
+ PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MCAST completions");
bnx2x_handle_mcast_eqe(sc);
return;
default:
- PMD_DRV_LOG(NOTICE, "Unsupported classification command: %d",
+ PMD_DRV_LOG(NOTICE, sc, "Unsupported classification command: %d",
elem->message.data.eth_event.echo);
return;
}
@@ -4242,9 +4270,10 @@ bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *e
rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE, "Failed to schedule new commands (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to schedule new commands (%d)", rc);
} else if (rc > 0) {
- PMD_DRV_LOG(DEBUG, "Scheduled next pending commands...");
+ PMD_DRV_LOG(DEBUG, sc, "Scheduled next pending commands...");
}
}
@@ -4308,7 +4337,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_STAT_QUERY:
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "got statistics completion event %d",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "got statistics completion event %d",
sc->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -4316,7 +4345,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case EVENT_RING_OPCODE_CFC_DEL:
/* handle according to cid range */
/* we may want to verify here that the sc state is HALTING */
- PMD_DRV_LOG(DEBUG, "got delete ramrod for MULTI[%d]",
+ PMD_DRV_LOG(DEBUG, sc, "got delete ramrod for MULTI[%d]",
cid);
q_obj = bnx2x_cid_to_q_obj(sc, cid);
if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
@@ -4325,14 +4354,14 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
goto next_spqe;
case EVENT_RING_OPCODE_STOP_TRAFFIC:
- PMD_DRV_LOG(DEBUG, "got STOP TRAFFIC");
+ PMD_DRV_LOG(DEBUG, sc, "got STOP TRAFFIC");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
break;
}
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
- PMD_DRV_LOG(DEBUG, "got START TRAFFIC");
+ PMD_DRV_LOG(DEBUG, sc, "got START TRAFFIC");
if (f_obj->complete_cmd
(sc, f_obj, ECORE_F_CMD_TX_START)) {
break;
@@ -4342,7 +4371,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
echo = elem->message.data.function_update_event.echo;
if (echo == SWITCH_UPDATE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got FUNC_SWITCH_UPDATE ramrod");
if (f_obj->complete_cmd(sc, f_obj,
ECORE_F_CMD_SWITCH_UPDATE))
@@ -4350,7 +4379,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
break;
}
} else {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"AFEX: ramrod completed FUNCTION_UPDATE");
f_obj->complete_cmd(sc, f_obj,
ECORE_F_CMD_AFEX_UPDATE);
@@ -4366,14 +4395,14 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_START:
- PMD_DRV_LOG(DEBUG, "got FUNC_START ramrod");
+ PMD_DRV_LOG(DEBUG, sc, "got FUNC_START ramrod");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
break;
}
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_STOP:
- PMD_DRV_LOG(DEBUG, "got FUNC_STOP ramrod");
+ PMD_DRV_LOG(DEBUG, sc, "got FUNC_STOP ramrod");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
break;
}
@@ -4385,7 +4414,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT):
cid =
elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
- PMD_DRV_LOG(DEBUG, "got RSS_UPDATE ramrod. CID %d",
+ PMD_DRV_LOG(DEBUG, sc, "got RSS_UPDATE ramrod. CID %d",
cid);
rss_raw->clear_pending(rss_raw);
break;
@@ -4396,7 +4425,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got (un)set mac ramrod");
bnx2x_handle_classification_eqe(sc, elem);
break;
@@ -4404,7 +4433,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got mcast ramrod");
bnx2x_handle_mcast_eqe(sc);
break;
@@ -4412,14 +4441,14 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got rx_mode ramrod");
bnx2x_handle_rx_mode_eqe(sc);
break;
default:
/* unknown event log error and continue */
- PMD_DRV_LOG(INFO, "Unknown EQ event %d, sc->state 0x%x",
+ PMD_DRV_LOG(INFO, sc, "Unknown EQ event %d, sc->state 0x%x",
elem->message.opcode, sc->state);
}
@@ -4445,12 +4474,16 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
uint16_t status;
int rc = 0;
+ PMD_DRV_LOG(DEBUG, sc, "---> SP TASK <---");
+
/* what work needs to be performed? */
status = bnx2x_update_dsb_idx(sc);
+ PMD_DRV_LOG(DEBUG, sc, "dsb status 0x%04x", status);
+
/* HW attentions */
if (status & BNX2X_DEF_SB_ATT_IDX) {
- PMD_DRV_LOG(DEBUG, "---> ATTN INTR <---");
+ PMD_DRV_LOG(DEBUG, sc, "---> ATTN INTR <---");
bnx2x_attn_int(sc);
status &= ~BNX2X_DEF_SB_ATT_IDX;
rc = 1;
@@ -4459,7 +4492,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
/* SP events: STAT_QUERY and others */
if (status & BNX2X_DEF_SB_IDX) {
/* handle EQ completions */
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "---> EQ INTR <---");
+ PMD_DRV_LOG(DEBUG, sc, "---> EQ INTR <---");
bnx2x_eq_int(sc);
bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
le16toh(sc->def_idx), IGU_INT_NOP, 1);
@@ -4468,7 +4501,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
/* if status is non zero then something went wrong */
if (unlikely(status)) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Got an unknown SP interrupt! (0x%04x)", status);
}
@@ -4484,7 +4517,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
- PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc,
+ "---> FP TASK QUEUE (%d) <--", fp->index);
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
@@ -4534,25 +4568,31 @@ int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
return 0;
}
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "Interrupt status 0x%04x", status);
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "Interrupt status 0x%04x", status);
//bnx2x_dump_status_block(sc);
FOR_EACH_ETH_QUEUE(sc, i) {
fp = &sc->fp[i];
mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
if (status & mask) {
+ /* acknowledge and disable further fastpath interrupts */
+ bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
+ 0, IGU_INT_DISABLE, 0);
bnx2x_handle_fp_tq(fp, scan_fp);
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
+ /* acknowledge and disable further slowpath interrupts */
+ bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
+ 0, IGU_INT_DISABLE, 0);
rc = bnx2x_handle_sp_tq(sc);
status &= ~0x1;
}
if (unlikely(status)) {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"Unexpected fastpath status (0x%08x)!", status);
}
@@ -4588,7 +4628,7 @@ static void bnx2x_init_func_obj(struct bnx2x_softc *sc)
{
sc->dmae_ready = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
ecore_init_func_obj(sc,
&sc->func_obj,
@@ -4604,7 +4644,7 @@ static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code)
struct ecore_func_state_params func_params = { NULL };
int rc;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* prepare the parameters for function state transitions */
bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -5193,7 +5233,7 @@ static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code)
break;
default:
- PMD_DRV_LOG(NOTICE, "Unknown load_code (0x%x) from MCP",
+ PMD_DRV_LOG(NOTICE, sc, "Unknown load_code (0x%x) from MCP",
load_code);
break;
}
@@ -5284,7 +5324,7 @@ bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg)
FUNC_MF_CFG_MAX_BW_SHIFT);
if (!max_cfg) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Max BW configured to 0 - using 100 instead");
max_cfg = 100;
}
@@ -5548,7 +5588,7 @@ static void bnx2x_igu_int_enable(struct bnx2x_softc *sc)
val |= IGU_PF_CONF_FUNC_EN;
- PMD_DRV_LOG(DEBUG, "write 0x%x to IGU mode %s",
+ PMD_DRV_LOG(DEBUG, sc, "write 0x%x to IGU mode %s",
val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
@@ -5596,7 +5636,7 @@ static void bnx2x_hc_int_disable(struct bnx2x_softc *sc)
REG_WR(sc, addr, val);
if (REG_RD(sc, addr) != val) {
- PMD_DRV_LOG(ERR, "proper val not read from HC IGU!");
+ PMD_DRV_LOG(ERR, sc, "proper val not read from HC IGU!");
}
}
@@ -5607,14 +5647,14 @@ static void bnx2x_igu_int_disable(struct bnx2x_softc *sc)
val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN);
- PMD_DRV_LOG(DEBUG, "write %x to IGU", val);
+ PMD_DRV_LOG(DEBUG, sc, "write %x to IGU", val);
/* flush all outstanding writes */
mb();
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
- PMD_DRV_LOG(ERR, "proper val not read from IGU!");
+ PMD_DRV_LOG(ERR, sc, "proper val not read from IGU!");
}
}
@@ -5631,7 +5671,7 @@ static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code)
{
int i;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
for (i = 0; i < sc->num_queues; i++) {
bnx2x_init_eth_fp(sc, i);
@@ -5761,7 +5801,7 @@ static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state)
/* If there is no power capability, silently succeed */
if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) {
- PMD_DRV_LOG(WARNING, "No power capability");
+ PMD_DRV_LOG(WARNING, sc, "No power capability");
return 0;
}
@@ -5806,7 +5846,7 @@ static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state)
break;
default:
- PMD_DRV_LOG(NOTICE, "Can't support PCI power state = %d",
+ PMD_DRV_LOG(NOTICE, sc, "Can't support PCI power state = %d",
state);
return -1;
}
@@ -5824,7 +5864,7 @@ static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return FALSE;
@@ -5844,7 +5884,7 @@ static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
return TRUE;
}
- PMD_DRV_LOG(NOTICE, "Failed to get a resource lock 0x%x", resource);
+ PMD_DRV_LOG(NOTICE, sc, "Failed to get a resource lock 0x%x", resource);
return FALSE;
}
@@ -5937,7 +5977,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc)
} while (cnt-- > 0);
if (cnt <= 0) {
- PMD_DRV_LOG(NOTICE, "Still pending IGU requests bits=0x%08x!",
+ PMD_DRV_LOG(NOTICE, sc, "Still pending IGU requests bits=0x%08x!",
pend_bits);
return -1;
}
@@ -6018,7 +6058,7 @@ static int bnx2x_init_shmem(struct bnx2x_softc *sc)
} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
- PMD_DRV_LOG(NOTICE, "BAD MCP validity signature");
+ PMD_DRV_LOG(NOTICE, sc, "BAD MCP validity signature");
return -1;
}
@@ -6173,7 +6213,7 @@ static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global)
} while (cnt-- > 0);
if (cnt <= 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"ERROR: Tetris buffer didn't get empty or there "
"are still outstanding read requests after 1s! "
"sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
@@ -6246,14 +6286,14 @@ static int bnx2x_leader_reset(struct bnx2x_softc *sc)
load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
if (!load_code) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
rc = -1;
goto exit_leader_reset;
}
if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
(load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"MCP unexpected response, aborting");
rc = -1;
goto exit_leader_reset2;
@@ -6261,7 +6301,7 @@ static int bnx2x_leader_reset(struct bnx2x_softc *sc)
load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
rc = -1;
goto exit_leader_reset2;
}
@@ -6269,7 +6309,7 @@ static int bnx2x_leader_reset(struct bnx2x_softc *sc)
/* try to recover after the failure */
if (bnx2x_process_kill(sc, global)) {
- PMD_DRV_LOG(NOTICE, "Something bad occurred on engine %d!",
+ PMD_DRV_LOG(NOTICE, sc, "Something bad occurred on engine %d!",
SC_PATH(sc));
rc = -1;
goto exit_leader_reset2;
@@ -6428,12 +6468,12 @@ bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
/* validate rings have enough entries to cross high thresholds */
if (sc->dropless_fc &&
pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) {
- PMD_DRV_LOG(WARNING, "rx bd ring threshold limit");
+ PMD_DRV_LOG(WARNING, sc, "rx bd ring threshold limit");
}
if (sc->dropless_fc &&
pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) {
- PMD_DRV_LOG(WARNING, "rcq ring threshold limit");
+ PMD_DRV_LOG(WARNING, sc, "rcq ring threshold limit");
}
pause->pri_map = 1;
@@ -6504,7 +6544,7 @@ bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t lea
struct ecore_queue_setup_params *setup_params = &q_params.params.setup;
int rc;
- PMD_DRV_LOG(DEBUG, "setting up queue %d", fp->index);
+ PMD_DRV_LOG(DEBUG, sc, "setting up queue %d", fp->index);
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
@@ -6522,11 +6562,11 @@ bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t lea
/* Change the state to INIT */
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Queue(%d) INIT failed", fp->index);
+ PMD_DRV_LOG(NOTICE, sc, "Queue(%d) INIT failed", fp->index);
return rc;
}
- PMD_DRV_LOG(DEBUG, "init complete");
+ PMD_DRV_LOG(DEBUG, sc, "init complete");
/* now move the Queue to the SETUP state */
memset(setup_params, 0, sizeof(*setup_params));
@@ -6550,7 +6590,7 @@ bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t lea
/* change the state to SETUP */
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Queue(%d) SETUP failed", fp->index);
+ PMD_DRV_LOG(NOTICE, sc, "Queue(%d) SETUP failed", fp->index);
return rc;
}
@@ -6678,11 +6718,11 @@ bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac,
rc = ecore_config_vlan_mac(sc, &ramrod_param);
if (rc == ECORE_EXISTS) {
- PMD_DRV_LOG(INFO, "Failed to schedule ADD operations (EEXIST)");
+ PMD_DRV_LOG(INFO, sc, "Failed to schedule ADD operations (EEXIST)");
/* do not treat adding same MAC as error */
rc = 0;
} else if (rc < 0) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"%s MAC failed (%d)", (set ? "Set" : "Delete"), rc);
}
@@ -6693,7 +6733,7 @@ static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set)
{
unsigned long ramrod_flags = 0;
- PMD_DRV_LOG(DEBUG, "Adding Ethernet MAC");
+ PMD_DRV_LOG(DEBUG, sc, "Adding Ethernet MAC");
bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -6849,7 +6889,7 @@ bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *da
}
/* report link status to OS, should be called under phy_lock */
-static void bnx2x_link_report(struct bnx2x_softc *sc)
+static void bnx2x_link_report_locked(struct bnx2x_softc *sc)
{
struct bnx2x_link_report_data cur_data;
@@ -6870,14 +6910,19 @@ static void bnx2x_link_report(struct bnx2x_softc *sc)
return;
}
+ PMD_DRV_LOG(INFO, sc, "Change in link status : cur_data = %lx, last_reported_link = %lx\n",
+ cur_data.link_report_flags,
+ sc->last_reported_link.link_report_flags);
+
sc->link_cnt++;
+ PMD_DRV_LOG(INFO, sc, "link status change count = %x\n", sc->link_cnt);
/* report new link params and remember the state for the next time */
rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags)) {
- PMD_DRV_LOG(INFO, "NIC Link is Down");
+ PMD_DRV_LOG(INFO, sc, "NIC Link is Down");
} else {
__rte_unused const char *duplex;
__rte_unused const char *flow;
@@ -6917,12 +6962,20 @@ static void bnx2x_link_report(struct bnx2x_softc *sc)
flow = "none";
}
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"NIC Link is Up, %d Mbps %s duplex, Flow control: %s",
cur_data.line_speed, duplex, flow);
}
}
+static void
+bnx2x_link_report(struct bnx2x_softc *sc)
+{
+ bnx2x_acquire_phy_lock(sc);
+ bnx2x_link_report_locked(sc);
+ bnx2x_release_phy_lock(sc);
+}
+
void bnx2x_link_status_update(struct bnx2x_softc *sc)
{
if (sc->state != BNX2X_STATE_OPEN) {
@@ -7001,6 +7054,8 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
bnx2x_set_requested_fc(sc);
+ bnx2x_acquire_phy_lock(sc);
+
if (load_mode == LOAD_DIAG) {
lp->loopback_mode = ELINK_LOOPBACK_XGXS;
/* Prefer doing PHY loopback at 10G speed, if possible */
@@ -7020,6 +7075,8 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
rc = elink_phy_init(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
+
bnx2x_calc_fc_adv(sc);
if (sc->link_vars.link_up) {
@@ -7058,7 +7115,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
{
if ((sc->state != BNX2X_STATE_OPEN) ||
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
- PMD_DRV_LOG(WARNING, "periodic callout exit (state=0x%x)",
+ PMD_DRV_LOG(INFO, sc, "periodic callout exit (state=0x%x)",
sc->state);
return;
}
@@ -7070,7 +7127,9 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
*/
mb();
if (sc->port.pmf) {
+ bnx2x_acquire_phy_lock(sc);
elink_period_func(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
}
}
#ifdef BNX2X_PULSE
@@ -7095,7 +7154,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
if ((drv_pulse != mcp_pulse) &&
(drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
/* someone lost a heartbeat... */
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"drv_pulse (0x%x) != mcp_pulse (0x%x)",
drv_pulse, mcp_pulse);
}
@@ -7111,7 +7170,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
uint32_t load_code = 0;
int i, rc = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->state = BNX2X_STATE_OPENING_WAITING_LOAD;
@@ -7165,7 +7224,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
goto bnx2x_nic_load_error2;
}
} else {
- PMD_DRV_LOG(INFO, "Device has no MCP!");
+ PMD_DRV_LOG(INFO, sc, "Device has no MCP!");
load_code = bnx2x_nic_load_no_mcp(sc);
}
@@ -7177,7 +7236,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
/* Initialize HW */
if (bnx2x_init_hw(sc, load_code) != 0) {
- PMD_DRV_LOG(NOTICE, "HW init failed");
+ PMD_DRV_LOG(NOTICE, sc, "HW init failed");
bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
sc->state = BNX2X_STATE_CLOSED;
rc = -ENXIO;
@@ -7197,7 +7256,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
sc->state = BNX2X_STATE_OPENING_WAITING_PORT;
rc = bnx2x_func_start(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Function start failed!");
+ PMD_DRV_LOG(NOTICE, sc, "Function start failed!");
bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
@@ -7208,7 +7267,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
load_code =
bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"MCP response failure, aborting");
sc->state = BNX2X_STATE_ERROR;
rc = -ENXIO;
@@ -7219,7 +7278,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
rc = bnx2x_setup_leading(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Setup leading failed!");
+ PMD_DRV_LOG(NOTICE, sc, "Setup leading failed!");
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7231,7 +7290,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Queue(%d) setup failed", i);
+ PMD_DRV_LOG(NOTICE, sc, "Queue(%d) setup failed", i);
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7239,7 +7298,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
rc = bnx2x_init_rss_pf(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "PF RSS init failed");
+ PMD_DRV_LOG(NOTICE, sc, "PF RSS init failed");
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7255,7 +7314,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
}
if (rc) {
- PMD_DRV_LOG(NOTICE, "Setting Ethernet MAC failed");
+ PMD_DRV_LOG(NOTICE, sc, "Setting Ethernet MAC failed");
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7307,13 +7366,13 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
/* wait for all pending SP commands to complete */
if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0UL)) {
- PMD_DRV_LOG(NOTICE, "Timeout waiting for all SPs to complete!");
+ PMD_DRV_LOG(NOTICE, sc, "Timeout waiting for all SPs to complete!");
bnx2x_periodic_stop(sc);
bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE);
return -ENXIO;
}
- PMD_DRV_LOG(DEBUG, "NIC successfully loaded");
+ PMD_DRV_LOG(DEBUG, sc, "NIC successfully loaded");
return 0;
@@ -7362,7 +7421,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
/* Check if the driver is still running and bail out if it is. */
if (sc->state != BNX2X_STATE_CLOSED) {
- PMD_DRV_LOG(DEBUG, "Init called while driver is running!");
+ PMD_DRV_LOG(DEBUG, sc, "Init called while driver is running!");
rc = 0;
goto bnx2x_init_done;
}
@@ -7400,7 +7459,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
&& (!global ||!other_load_status))
&& bnx2x_trylock_leader_lock(sc)
&& !bnx2x_leader_reset(sc)) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Recovered during init");
break;
}
@@ -7410,7 +7469,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
sc->recovery_state = BNX2X_RECOVERY_FAILED;
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Recovery flow hasn't properly "
"completed yet, try again later. "
"If you still see this message after a "
@@ -7429,7 +7488,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
bnx2x_init_done:
if (rc) {
- PMD_DRV_LOG(NOTICE, "Initialization failed, "
+ PMD_DRV_LOG(NOTICE, sc, "Initialization failed, "
"stack notified driver is NOT running!");
}
@@ -7461,7 +7520,7 @@ static void bnx2x_get_function_num(struct bnx2x_softc *sc)
sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Relative function %d, Absolute function %d, Path %d",
sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
}
@@ -7498,14 +7557,14 @@ static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg)
/* ensure PCIe capability is enabled */
caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found PCIe capability: "
+ PMD_DRV_LOG(DEBUG, sc, "Found PCIe capability: "
"id=0x%04X type=0x%04X addr=0x%08X",
caps->id, caps->type, caps->addr);
pci_read(sc, (caps->addr + reg), &ret, 2);
return ret;
}
- PMD_DRV_LOG(WARNING, "PCIe capability NOT FOUND!!!");
+ PMD_DRV_LOG(WARNING, sc, "PCIe capability NOT FOUND!!!");
return 0;
}
@@ -7523,7 +7582,7 @@ static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
*/
static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
{
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
struct bnx2x_pci_cap *caps;
uint16_t link_status;
@@ -7532,7 +7591,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
/* check if PCI Power Management is enabled */
caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found PM capability: "
+ PMD_DRV_LOG(DEBUG, sc, "Found PM capability: "
"id=0x%04X type=0x%04X addr=0x%08X",
caps->id, caps->type, caps->addr);
@@ -7546,7 +7605,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
sc->devinfo.pcie_link_width =
((link_status & PCIM_LINK_STA_WIDTH) >> 4);
- PMD_DRV_LOG(DEBUG, "PCIe link speed=%d width=%d",
+ PMD_DRV_LOG(DEBUG, sc, "PCIe link speed=%d width=%d",
sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG;
@@ -7554,7 +7613,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
/* check if MSI capability is enabled */
caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found MSI capability at 0x%04x", reg);
+ PMD_DRV_LOG(DEBUG, sc, "Found MSI capability at 0x%04x", reg);
sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG;
sc->devinfo.pcie_msi_cap_reg = caps->addr;
@@ -7563,7 +7622,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
/* check if MSI-X capability is enabled */
caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found MSI-X capability at 0x%04x", reg);
+ PMD_DRV_LOG(DEBUG, sc, "Found MSI-X capability at 0x%04x", reg);
sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG;
sc->devinfo.pcie_msix_cap_reg = caps->addr;
@@ -7583,7 +7642,7 @@ static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc)
mf_info->multi_vnics_mode = 1;
if (!VALID_OVLAN(mf_info->ext_id)) {
- PMD_DRV_LOG(NOTICE, "Invalid VLAN (%d)", mf_info->ext_id);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid VLAN (%d)", mf_info->ext_id);
return 1;
}
@@ -7707,14 +7766,14 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
/* various MF mode sanity checks... */
if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Enumerated function %d is marked as hidden",
SC_PORT(sc));
return 1;
}
if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
- PMD_DRV_LOG(NOTICE, "vnics_per_port=%d multi_vnics_mode=%d",
+ PMD_DRV_LOG(NOTICE, sc, "vnics_per_port=%d multi_vnics_mode=%d",
mf_info->vnics_per_port, mf_info->multi_vnics_mode);
return 1;
}
@@ -7722,13 +7781,13 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
/* vnic id > 0 must have valid ovlan in switch-dependent mode */
if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
- PMD_DRV_LOG(NOTICE, "mf_mode=SD vnic_id=%d ovlan=%d",
+ PMD_DRV_LOG(NOTICE, sc, "mf_mode=SD vnic_id=%d ovlan=%d",
SC_VN(sc), OVLAN(sc));
return 1;
}
if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"mf_mode=SD multi_vnics_mode=%d ovlan=%d",
mf_info->multi_vnics_mode, OVLAN(sc));
return 1;
@@ -7747,7 +7806,7 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
&& !VALID_OVLAN(ovlan1))
|| ((!mf_info->multi_vnics_mode)
&& VALID_OVLAN(ovlan1)))) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"mf_mode=SD function %d MF config "
"mismatch, multi_vnics_mode=%d ovlan=%d",
i, mf_info->multi_vnics_mode,
@@ -7771,7 +7830,7 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
&& !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE)
&& VALID_OVLAN(ovlan2)
&& (ovlan1 == ovlan2)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"mf_mode=SD functions %d and %d "
"have the same ovlan (%d)",
i, j, ovlan1);
@@ -7801,7 +7860,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
}
if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
- PMD_DRV_LOG(NOTICE, "Invalid mf_cfg_base!");
+ PMD_DRV_LOG(NOTICE, sc, "Invalid mf_cfg_base!");
return 1;
}
@@ -7819,7 +7878,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
mf_info->mf_mode = MULTI_FUNCTION_SI;
} else {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Invalid config for Switch Independent mode");
}
@@ -7835,7 +7894,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
mf_info->mf_mode = MULTI_FUNCTION_SD;
} else {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Invalid config for Switch Dependent mode");
}
@@ -7859,14 +7918,14 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
(mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
mf_info->mf_mode = MULTI_FUNCTION_AFEX;
} else {
- PMD_DRV_LOG(NOTICE, "Invalid config for AFEX mode");
+ PMD_DRV_LOG(NOTICE, sc, "Invalid config for AFEX mode");
}
break;
default:
- PMD_DRV_LOG(NOTICE, "Unknown MF mode (0x%08x)",
+ PMD_DRV_LOG(NOTICE, sc, "Unknown MF mode (0x%08x)",
(val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
return 1;
@@ -7898,7 +7957,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
if (mf_info->mf_mode == SINGLE_FUNCTION) {
/* invalid MF config */
if (SC_VN(sc) >= 1) {
- PMD_DRV_LOG(NOTICE, "VNIC ID >= 1 in SF mode");
+ PMD_DRV_LOG(NOTICE, sc, "VNIC ID >= 1 in SF mode");
return 1;
}
@@ -7927,7 +7986,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
default:
- PMD_DRV_LOG(NOTICE, "Get MF config failed (mf_mode=0x%08x)",
+ PMD_DRV_LOG(NOTICE, sc, "Get MF config failed (mf_mode=0x%08x)",
mf_info->mf_mode);
return 1;
}
@@ -7955,7 +8014,7 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
int port;
uint32_t mac_hi, mac_lo, val;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
port = SC_PORT(sc);
mac_hi = mac_lo = 0;
@@ -8029,7 +8088,7 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
if ((mac_lo == 0) && (mac_hi == 0)) {
*sc->mac_addr_str = 0;
- PMD_DRV_LOG(NOTICE, "No Ethernet address programmed!");
+ PMD_DRV_LOG(NOTICE, sc, "No Ethernet address programmed!");
} else {
sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8);
sc->link_params.mac_addr[1] = (uint8_t) (mac_hi);
@@ -8045,7 +8104,8 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
sc->link_params.mac_addr[3],
sc->link_params.mac_addr[4],
sc->link_params.mac_addr[5]);
- PMD_DRV_LOG(DEBUG, "Ethernet address: %s", sc->mac_addr_str);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Ethernet address: %s", sc->mac_addr_str);
}
return 0;
@@ -8060,24 +8120,24 @@ static void bnx2x_media_detect(struct bnx2x_softc *sc)
case ELINK_ETH_PHY_XFP_FIBER:
case ELINK_ETH_PHY_KR:
case ELINK_ETH_PHY_CX4:
- PMD_DRV_LOG(INFO, "Found 10GBase-CX4 media.");
+ PMD_DRV_LOG(INFO, sc, "Found 10GBase-CX4 media.");
sc->media = IFM_10G_CX4;
break;
case ELINK_ETH_PHY_DA_TWINAX:
- PMD_DRV_LOG(INFO, "Found 10Gb Twinax media.");
+ PMD_DRV_LOG(INFO, sc, "Found 10Gb Twinax media.");
sc->media = IFM_10G_TWINAX;
break;
case ELINK_ETH_PHY_BASE_T:
- PMD_DRV_LOG(INFO, "Found 10GBase-T media.");
+ PMD_DRV_LOG(INFO, sc, "Found 10GBase-T media.");
sc->media = IFM_10G_T;
break;
case ELINK_ETH_PHY_NOT_PRESENT:
- PMD_DRV_LOG(INFO, "Media not present.");
+ PMD_DRV_LOG(INFO, sc, "Media not present.");
sc->media = 0;
break;
case ELINK_ETH_PHY_UNSPECIFIED:
default:
- PMD_DRV_LOG(INFO, "Unknown media!");
+ PMD_DRV_LOG(INFO, sc, "Unknown media!");
sc->media = 0;
break;
}
@@ -8140,7 +8200,7 @@ static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc)
sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
if (igu_sb_cnt == 0) {
- PMD_DRV_LOG(ERR, "CAM configuration error");
+ PMD_DRV_LOG(ERR, sc, "CAM configuration error");
return -1;
}
@@ -8177,7 +8237,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
sc->devinfo.chip_id |= 0x1;
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)",
sc->devinfo.chip_id,
((sc->devinfo.chip_id >> 16) & 0xffff),
@@ -8188,7 +8248,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
val = (REG_RD(sc, 0x2874) & 0x55);
if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) {
sc->flags |= BNX2X_ONE_PORT_FLAG;
- PMD_DRV_LOG(DEBUG, "single port device");
+ PMD_DRV_LOG(DEBUG, sc, "single port device");
}
/* set the doorbell size */
@@ -8212,7 +8272,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
sc->devinfo.chip_port_mode =
(val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
- PMD_DRV_LOG(DEBUG, "Port mode = %s", (val) ? "4" : "2");
+ PMD_DRV_LOG(DEBUG, sc, "Port mode = %s", (val) ? "4" : "2");
}
/* get the function and path info for the device */
@@ -8227,7 +8287,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
if (!sc->devinfo.shmem_base) {
/* this should ONLY prevent upcoming shmem reads */
- PMD_DRV_LOG(INFO, "MCP not active");
+ PMD_DRV_LOG(INFO, sc, "MCP not active");
sc->flags |= BNX2X_NO_MCP_FLAG;
return 0;
}
@@ -8236,7 +8296,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
(SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
- PMD_DRV_LOG(NOTICE, "Invalid SHMEM validity signature: 0x%08x",
+ PMD_DRV_LOG(NOTICE, sc, "Invalid SHMEM validity signature: 0x%08x",
val);
return 0;
}
@@ -8249,7 +8309,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
((sc->devinfo.bc_ver >> 24) & 0xff),
((sc->devinfo.bc_ver >> 16) & 0xff),
((sc->devinfo.bc_ver >> 8) & 0xff));
- PMD_DRV_LOG(INFO, "Bootcode version: %s", sc->devinfo.bc_ver_str);
+ PMD_DRV_LOG(INFO, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str);
/* get the bootcode shmem address */
sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc);
@@ -8304,7 +8364,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
}
if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"FORCING IGU Normal Mode failed!!!");
bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
return -1;
@@ -8312,10 +8372,10 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
}
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
- PMD_DRV_LOG(DEBUG, "IGU Backward Compatible Mode");
+ PMD_DRV_LOG(DEBUG, sc, "IGU Backward Compatible Mode");
sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
} else {
- PMD_DRV_LOG(DEBUG, "IGU Normal Mode");
+ PMD_DRV_LOG(DEBUG, sc, "IGU Normal Mode");
}
rc = bnx2x_get_igu_cam_info(sc);
@@ -8389,7 +8449,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
}
if (!(sc->port.supported[0] || sc->port.supported[1])) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)",
SHMEM_RD(sc,
dev_info.port_hw_config
@@ -8415,7 +8475,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18);
break;
default:
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid switch config in"
"link_config=0x%08x",
sc->port.link_config[0]);
@@ -8423,7 +8483,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
}
}
- PMD_DRV_LOG(INFO, "PHY addr 0x%08x", sc->port.phy_addr);
+ PMD_DRV_LOG(INFO, sc, "PHY addr 0x%08x", sc->port.phy_addr);
/* mask what we support according to speed_cap_mask per configuration */
for (idx = 0; idx < cfg_size; idx++) {
@@ -8476,7 +8536,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
}
}
- PMD_DRV_LOG(INFO, "PHY supported 0=0x%08x 1=0x%08x",
+ PMD_DRV_LOG(INFO, sc, "PHY supported 0=0x%08x 1=0x%08x",
sc->port.supported[0], sc->port.supported[1]);
}
@@ -8535,7 +8595,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_10baseT_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8555,7 +8615,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_10baseT_Half | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8574,7 +8634,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_100baseT_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8594,7 +8654,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_100baseT_Half | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8612,7 +8672,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_1000baseT_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8630,7 +8690,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_2500baseX_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8649,7 +8709,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
(ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8664,7 +8724,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
break;
default:
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x", link_config,
sc->link_params.speed_cap_mask[idx]);
@@ -8695,7 +8755,7 @@ static void bnx2x_get_phy_info(struct bnx2x_softc *sc)
uint8_t port = SC_PORT(sc);
uint32_t eee_mode;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* shmem data already read in bnx2x_get_shmem_info() */
@@ -8855,7 +8915,7 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
snprintf(buf, sizeof(buf), "fp_%d_sb", i);
if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block),
&fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) {
- PMD_DRV_LOG(NOTICE, "Failed to alloc %s", buf);
+ PMD_DRV_LOG(NOTICE, sc, "Failed to alloc %s", buf);
return -1;
} else {
if (CHIP_IS_E2E3(sc)) {
@@ -8945,7 +9005,7 @@ static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc)
uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
if (!rc) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
return -1;
}
@@ -8977,12 +9037,12 @@ static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc)
tmp = bnx2x_prev_path_get_entry(sc);
if (tmp) {
if (tmp->aer) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Path %d/%d/%d was marked by AER",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
} else {
rc = TRUE;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Path %d/%d/%d was already cleaned from previous drivers",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
}
@@ -9003,11 +9063,11 @@ static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi)
tmp = bnx2x_prev_path_get_entry(sc);
if (tmp) {
if (!tmp->aer) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Re-marking AER in path %d/%d/%d",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
} else {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Removing AER indication from path %d/%d/%d",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
tmp->aer = 0;
@@ -9023,7 +9083,7 @@ static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi)
tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node),
RTE_CACHE_LINE_SIZE);
if (!tmp) {
- PMD_DRV_LOG(NOTICE, "Failed to allocate 'bnx2x_prev_list_node'");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to allocate 'bnx2x_prev_list_node'");
return -1;
}
@@ -9048,13 +9108,13 @@ static int bnx2x_do_flr(struct bnx2x_softc *sc)
/* only E2 and onwards support FLR */
if (CHIP_IS_E1x(sc)) {
- PMD_DRV_LOG(WARNING, "FLR not supported in E1H");
+ PMD_DRV_LOG(WARNING, sc, "FLR not supported in E1H");
return -1;
}
/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"FLR not supported by BC_VER: 0x%08x",
sc->devinfo.bc_ver);
return -1;
@@ -9071,7 +9131,7 @@ static int bnx2x_do_flr(struct bnx2x_softc *sc)
}
}
- PMD_DRV_LOG(NOTICE, "PCIE transaction is not cleared, "
+ PMD_DRV_LOG(NOTICE, sc, "PCIE transaction is not cleared, "
"proceeding with reset anyway");
clear:
@@ -9219,7 +9279,7 @@ static int bnx2x_prev_unload_common(struct bnx2x_softc *sc)
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) {
- PMD_DRV_LOG(DEBUG, "UNDI previously loaded");
+ PMD_DRV_LOG(DEBUG, sc, "UNDI previously loaded");
prev_undi = TRUE;
/* clear the UNDI indication */
REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
@@ -9238,7 +9298,7 @@ static int bnx2x_prev_unload_common(struct bnx2x_softc *sc)
break;
}
- PMD_DRV_LOG(DEBUG, "BRB still has 0x%08x", tmp_reg);
+ PMD_DRV_LOG(DEBUG, sc, "BRB still has 0x%08x", tmp_reg);
/* reset timer as long as BRB actually gets emptied */
if (prev_brb > tmp_reg) {
@@ -9256,7 +9316,7 @@ static int bnx2x_prev_unload_common(struct bnx2x_softc *sc)
}
if (!timer_count) {
- PMD_DRV_LOG(NOTICE, "Failed to empty BRB");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to empty BRB");
}
}
@@ -9311,7 +9371,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc)
return 0;
}
- PMD_DRV_LOG(INFO, "Could not FLR");
+ PMD_DRV_LOG(INFO, sc, "Could not FLR");
/* Close the MCP request, return failure */
rc = bnx2x_prev_mcp_done(sc);
@@ -9358,7 +9418,7 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
rc = -1;
break;
}
@@ -9378,7 +9438,7 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
} while (--time_counter);
if (!time_counter || rc) {
- PMD_DRV_LOG(NOTICE, "Failed to unload previous driver!");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to unload previous driver!");
rc = -1;
}
@@ -9395,7 +9455,7 @@ bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabl
sc->dcb_state = FALSE;
sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"DCB state [%s:%s]",
dcb_on ? "ON" : "OFF",
(dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" :
@@ -9428,7 +9488,7 @@ static void bnx2x_init_multi_cos(struct bnx2x_softc *sc)
if (cos < sc->max_cos) {
sc->prio_to_cos[pri] = cos;
} else {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"Invalid COS %d for priority %d "
"(max COS is %d), setting to 0", cos, pri,
(sc->max_cos - 1));
@@ -9449,7 +9509,7 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap),
RTE_CACHE_LINE_SIZE);
if (!cap) {
- PMD_DRV_LOG(NOTICE, "Failed to allocate memory");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory");
return -ENOMEM;
}
@@ -9460,7 +9520,7 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
pci_read(sc, PCIR_STATUS, &status, 2);
if (!(status & PCIM_STATUS_CAPPRESENT)) {
#endif
- PMD_DRV_LOG(NOTICE, "PCIe capability reading failed");
+ PMD_DRV_LOG(NOTICE, sc, "PCIe capability reading failed");
return -1;
}
@@ -9480,7 +9540,7 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
sizeof(struct bnx2x_pci_cap),
RTE_CACHE_LINE_SIZE);
if (!cap->next) {
- PMD_DRV_LOG(NOTICE, "Failed to allocate memory");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory");
return -ENOMEM;
}
cap = cap->next;
@@ -9516,25 +9576,25 @@ void bnx2x_load_firmware(struct bnx2x_softc *sc)
? FW_NAME_57711 : FW_NAME_57810;
f = open(fwname, O_RDONLY);
if (f < 0) {
- PMD_DRV_LOG(NOTICE, "Can't open firmware file");
+ PMD_DRV_LOG(NOTICE, sc, "Can't open firmware file");
return;
}
if (fstat(f, &st) < 0) {
- PMD_DRV_LOG(NOTICE, "Can't stat firmware file");
+ PMD_DRV_LOG(NOTICE, sc, "Can't stat firmware file");
close(f);
return;
}
sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE);
if (!sc->firmware) {
- PMD_DRV_LOG(NOTICE, "Can't allocate memory for firmware");
+ PMD_DRV_LOG(NOTICE, sc, "Can't allocate memory for firmware");
close(f);
return;
}
if (read(f, sc->firmware, st.st_size) != st.st_size) {
- PMD_DRV_LOG(NOTICE, "Can't read firmware data");
+ PMD_DRV_LOG(NOTICE, sc, "Can't read firmware data");
close(f);
return;
}
@@ -9542,10 +9602,11 @@ void bnx2x_load_firmware(struct bnx2x_softc *sc)
sc->fw_len = st.st_size;
if (sc->fw_len < FW_HEADER_LEN) {
- PMD_DRV_LOG(NOTICE, "Invalid fw size: %" PRIu64, sc->fw_len);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Invalid fw size: %" PRIu64, sc->fw_len);
return;
}
- PMD_DRV_LOG(DEBUG, "fw_len = %" PRIu64, sc->fw_len);
+ PMD_DRV_LOG(DEBUG, sc, "fw_len = %" PRIu64, sc->fw_len);
}
static void
@@ -9612,11 +9673,11 @@ int bnx2x_attach(struct bnx2x_softc *sc)
{
int rc;
- PMD_DRV_LOG(DEBUG, "Starting attach...");
+ PMD_DRV_LOG(DEBUG, sc, "Starting attach...");
rc = bnx2x_pci_get_caps(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "PCIe caps reading was failed");
+ PMD_DRV_LOG(NOTICE, sc, "PCIe caps reading was failed");
return rc;
}
@@ -9655,7 +9716,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
/* get device info and set params */
if (bnx2x_get_device_info(sc) != 0) {
- PMD_DRV_LOG(NOTICE, "getting device info");
+ PMD_DRV_LOG(NOTICE, sc, "getting device info");
return -ENXIO;
}
@@ -9754,7 +9815,7 @@ bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id,
mb();
- PMD_DRV_LOG(DEBUG, "write 0x%08x to IGU(via GRC) addr 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "write 0x%08x to IGU(via GRC) addr 0x%x",
ctl, igu_addr_ctl);
REG_WR(sc, igu_addr_ctl, ctl);
@@ -9766,7 +9827,7 @@ bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id,
}
if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Unable to finish IGU cleanup: "
"idu_sb_id %d offset %d bit %d (cnt %d)",
idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt);
@@ -9786,7 +9847,7 @@ static void bnx2x_reset_common(struct bnx2x_softc *sc)
{
uint32_t val = 0x1400;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* reset_common */
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR),
@@ -9820,8 +9881,10 @@ static void bnx2x_common_init_phy(struct bnx2x_softc *sc)
shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
}
+ bnx2x_acquire_phy_lock(sc);
elink_common_init_phy(sc, shmem_base, shmem2_base,
sc->devinfo.chip_id, 0);
+ bnx2x_release_phy_lock(sc);
}
static void bnx2x_pf_disable(struct bnx2x_softc *sc)
@@ -9995,7 +10058,8 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
uint8_t abs_func_id;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "starting common init for func %d", SC_ABS_FUNC(sc));
+ PMD_DRV_LOG(DEBUG, sc,
+ "starting common init for func %d", SC_ABS_FUNC(sc));
/*
* take the RESET lock to protect undi_unload flow from accessing
@@ -10078,12 +10142,12 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "PXP2 CFG failed");
+ PMD_DRV_LOG(NOTICE, sc, "PXP2 CFG failed");
return -1;
}
val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "PXP2 RD_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "PXP2 RD_INIT failed");
return -1;
}
@@ -10205,7 +10269,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
} while (factor-- && (val != 1));
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "ATC_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "ATC_INIT failed");
return -1;
}
}
@@ -10343,7 +10407,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
if (sizeof(union cdu_context) != 1024) {
/* we currently assume that a context is 1024 bytes */
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"please adjust the size of cdu_context(%ld)",
(long)sizeof(union cdu_context));
}
@@ -10405,17 +10469,17 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
/* finish CFC init */
val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "CFC LL_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "CFC LL_INIT failed");
return -1;
}
val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "CFC AC_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "CFC AC_INIT failed");
return -1;
}
val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "CFC CAM_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "CFC CAM_INIT failed");
return -1;
}
REG_WR(sc, CFC_REG_DEBUG0, 0);
@@ -10468,7 +10532,7 @@ static int bnx2x_init_hw_port(struct bnx2x_softc *sc)
uint32_t low, high;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "starting port init for port %d", port);
+ PMD_DRV_LOG(DEBUG, sc, "starting port init for port %d", port);
REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
@@ -10695,7 +10759,7 @@ bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg,
uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
if (val != 0) {
- PMD_DRV_LOG(NOTICE, "%s usage count=%d", msg, val);
+ PMD_DRV_LOG(NOTICE, sc, "%s usage count=%d", msg, val);
return -1;
}
@@ -10787,7 +10851,7 @@ bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func,
int ret = 0;
if (REG_RD(sc, comp_addr)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Cleanup complete was not 0 before sending");
return -1;
}
@@ -10800,8 +10864,8 @@ bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func,
REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
- PMD_DRV_LOG(NOTICE, "FW final cleanup did not succeed");
- PMD_DRV_LOG(DEBUG, "At timeout completion address contained %x",
+ PMD_DRV_LOG(NOTICE, sc, "FW final cleanup did not succeed");
+ PMD_DRV_LOG(DEBUG, sc, "At timeout completion address contained %x",
(REG_RD(sc, comp_addr)));
rte_panic("FLR cleanup failed");
return -1;
@@ -10917,28 +10981,30 @@ static void bnx2x_hw_enable_status(struct bnx2x_softc *sc)
__rte_unused uint32_t val;
val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
- PMD_DRV_LOG(DEBUG, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val);
val = REG_RD(sc, PBF_REG_DISABLE_PF);
- PMD_DRV_LOG(DEBUG, "PBF_REG_DISABLE_PF is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "PBF_REG_DISABLE_PF is 0x%x", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
- PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
- PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
- PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val);
val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
- PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc,
+ "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val);
val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
- PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc,
+ "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val);
val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
- PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x",
val);
}
@@ -10982,7 +11048,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc)
/* Verify no pending pci transactions */
if (bnx2x_is_pcie_pending(sc)) {
- PMD_DRV_LOG(NOTICE, "PCIE Transactions still pending");
+ PMD_DRV_LOG(NOTICE, sc, "PCIE Transactions still pending");
}
/* Debug */
@@ -11009,13 +11075,13 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
int main_mem_width, rc;
uint32_t i;
- PMD_DRV_LOG(DEBUG, "starting func init for func %d", func);
+ PMD_DRV_LOG(DEBUG, sc, "starting func init for func %d", func);
/* FLR cleanup */
if (!CHIP_IS_E1x(sc)) {
rc = bnx2x_pf_flr_clnup(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "FLR cleanup failed!");
+ PMD_DRV_LOG(NOTICE, sc, "FLR cleanup failed!");
return rc;
}
}
@@ -11262,7 +11328,7 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
val = REG_RD(sc, main_mem_prty_clr);
if (val) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Parity errors in HC block during function init (0x%x)!",
val);
}
@@ -11297,10 +11363,12 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
static void bnx2x_link_reset(struct bnx2x_softc *sc)
{
if (!BNX2X_NOMCP(sc)) {
+ bnx2x_acquire_phy_lock(sc);
elink_lfa_reset(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
} else {
if (!CHIP_REV_IS_SLOW(sc)) {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"Bootcode is missing - cannot reset link");
}
}
@@ -11330,7 +11398,7 @@ static void bnx2x_reset_port(struct bnx2x_softc *sc)
/* Check for BRB port occupancy */
val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4);
if (val) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"BRB1 is not empty, %d blocks are occupied", val);
}
}
@@ -11524,10 +11592,10 @@ static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len)
int ret;
int data_begin = cut_gzip_prefix(zbuf, len);
- PMD_DRV_LOG(DEBUG, "ecore_gunzip %d", len);
+ PMD_DRV_LOG(DEBUG, sc, "ecore_gunzip %d", len);
if (data_begin <= 0) {
- PMD_DRV_LOG(NOTICE, "bad gzip prefix");
+ PMD_DRV_LOG(NOTICE, sc, "bad gzip prefix");
return -1;
}
@@ -11539,19 +11607,19 @@ static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len)
ret = inflateInit2(&zlib_stream, -MAX_WBITS);
if (ret != Z_OK) {
- PMD_DRV_LOG(NOTICE, "zlib inflateInit2 error");
+ PMD_DRV_LOG(NOTICE, sc, "zlib inflateInit2 error");
return ret;
}
ret = inflate(&zlib_stream, Z_FINISH);
if ((ret != Z_STREAM_END) && (ret != Z_OK)) {
- PMD_DRV_LOG(NOTICE, "zlib inflate error: %d %s", ret,
+ PMD_DRV_LOG(NOTICE, sc, "zlib inflate error: %d %s", ret,
zlib_stream.msg);
}
sc->gz_outlen = zlib_stream.total_out;
if (sc->gz_outlen & 0x3) {
- PMD_DRV_LOG(NOTICE, "firmware is not aligned. gz_outlen == %d",
+ PMD_DRV_LOG(NOTICE, sc, "firmware is not aligned. gz_outlen == %d",
sc->gz_outlen);
}
sc->gz_outlen >>= 2;
@@ -11670,7 +11738,7 @@ void bnx2x_print_adapter_info(struct bnx2x_softc *sc)
int i = 0;
__rte_unused uint32_t ext_phy_type;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (sc->link_vars.phy_flags & PHY_XGXS_FLAG)
ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(REG_RD(sc,
sc->
@@ -11689,97 +11757,102 @@ void bnx2x_print_adapter_info(struct bnx2x_softc *sc)
dev_info.port_hw_config
[0].external_phy_config)));
- PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
/* Hardware chip info. */
- PMD_INIT_LOG(DEBUG, "%12s : %#08x", "ASIC", sc->devinfo.chip_id);
- PMD_INIT_LOG(DEBUG, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A',
+ PMD_DRV_LOG(INFO, sc, "%12s : %#08x", "ASIC", sc->devinfo.chip_id);
+ PMD_DRV_LOG(INFO, sc, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A',
(CHIP_METAL(sc) >> 4));
/* Bus info. */
- PMD_INIT_LOG(DEBUG, "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
switch (sc->devinfo.pcie_link_speed) {
case 1:
- PMD_INIT_LOG(DEBUG, "%23s", "2.5 Gbps");
+ PMD_DRV_LOG(INFO, sc, "%23s", "2.5 Gbps");
break;
case 2:
- PMD_INIT_LOG(DEBUG, "%21s", "5 Gbps");
+ PMD_DRV_LOG(INFO, sc, "%21s", "5 Gbps");
break;
case 4:
- PMD_INIT_LOG(DEBUG, "%21s", "8 Gbps");
+ PMD_DRV_LOG(INFO, sc, "%21s", "8 Gbps");
break;
default:
- PMD_INIT_LOG(DEBUG, "%33s", "Unknown link speed");
+ PMD_DRV_LOG(INFO, sc, "%33s", "Unknown link speed");
}
/* Device features. */
- PMD_INIT_LOG(DEBUG, "%12s : ", "Flags");
+ PMD_DRV_LOG(INFO, sc, "%12s : ", "Flags");
/* Miscellaneous flags. */
if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) {
- PMD_INIT_LOG(DEBUG, "%18s", "MSI");
+ PMD_DRV_LOG(INFO, sc, "%18s", "MSI");
i++;
}
if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) {
if (i > 0)
- PMD_INIT_LOG(DEBUG, "|");
- PMD_INIT_LOG(DEBUG, "%20s", "MSI-X");
+ PMD_DRV_LOG(INFO, sc, "|");
+ PMD_DRV_LOG(INFO, sc, "%20s", "MSI-X");
i++;
}
if (IS_PF(sc)) {
- PMD_INIT_LOG(DEBUG, "%12s : ", "Queues");
+ PMD_DRV_LOG(INFO, sc, "%12s : ", "Queues");
switch (sc->sp->rss_rdata.rss_mode) {
case ETH_RSS_MODE_DISABLED:
- PMD_INIT_LOG(DEBUG, "%19s", "None");
+ PMD_DRV_LOG(INFO, sc, "%19s", "None");
break;
case ETH_RSS_MODE_REGULAR:
- PMD_INIT_LOG(DEBUG, "%18s : %d", "RSS", sc->num_queues);
+ PMD_DRV_LOG(INFO, sc,
+ "%18s : %d", "RSS", sc->num_queues);
break;
default:
- PMD_INIT_LOG(DEBUG, "%22s", "Unknown");
+ PMD_DRV_LOG(INFO, sc, "%22s", "Unknown");
break;
}
}
/* RTE and Driver versions */
- PMD_INIT_LOG(DEBUG, "%12s : %s", "DPDK",
- rte_version());
- PMD_INIT_LOG(DEBUG, "%12s : %s", "Driver",
- bnx2x_pmd_version());
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK",
+ rte_version());
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver",
+ bnx2x_pmd_version());
/* Firmware versions and device features. */
- PMD_INIT_LOG(DEBUG, "%12s : %d.%d.%d",
+ PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d",
"Firmware",
BNX2X_5710_FW_MAJOR_VERSION,
BNX2X_5710_FW_MINOR_VERSION,
BNX2X_5710_FW_REVISION_VERSION);
- PMD_INIT_LOG(DEBUG, "%12s : %s",
+ PMD_DRV_LOG(INFO, sc, "%12s : %s",
"Bootcode", sc->devinfo.bc_ver_str);
- PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
- PMD_INIT_LOG(DEBUG, "%12s : %u", "Bnx2x Func", sc->pcie_func);
- PMD_INIT_LOG(DEBUG, "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "DMAE Is",
+ PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc, "%12s : %u", "Bnx2x Func", sc->pcie_func);
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "DMAE Is",
(sc->dmae_ready ? "Ready" : "Not Ready"));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
- PMD_INIT_LOG(DEBUG, "%12s : %u", "MTU", sc->mtu);
- PMD_INIT_LOG(DEBUG, "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type));
- PMD_INIT_LOG(DEBUG, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr",
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
+ PMD_DRV_LOG(INFO, sc, "%12s : %u", "MTU", sc->mtu);
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type));
+ PMD_DRV_LOG(INFO, sc, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr",
sc->link_params.mac_addr[0],
sc->link_params.mac_addr[1],
sc->link_params.mac_addr[2],
sc->link_params.mac_addr[3],
sc->link_params.mac_addr[4],
sc->link_params.mac_addr[5]);
- PMD_INIT_LOG(DEBUG, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "State", get_state(sc->state));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "State", get_state(sc->state));
if (sc->recovery_state)
- PMD_INIT_LOG(DEBUG, "%12s : %s", "Recovery",
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "Recovery",
get_recovery_state(sc->recovery_state));
- PMD_INIT_LOG(DEBUG, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
+ PMD_DRV_LOG(INFO, sc, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
sc->cq_spq_left, sc->eq_spq_left);
- PMD_INIT_LOG(DEBUG, "%12s : %x", "Switch", sc->link_params.switch_cfg);
- PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %x", "Switch", sc->link_params.switch_cfg);
+ PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 0f6024fb..74780725 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -725,6 +725,13 @@ struct bnx2x_port {
uint32_t phy_addr;
+ /* Used to synchronize phy accesses. */
+ rte_spinlock_t phy_mtx;
+ char phy_mtx_name[32];
+
+#define BNX2X_PHY_LOCK(sc) rte_spinlock_lock(&sc->port.phy_mtx)
+#define BNX2X_PHY_UNLOCK(sc) rte_spinlock_unlock(&sc->port.phy_mtx)
+
/*
* MCP scratchpad address for port specific statistics.
* The device is responsible for writing statistcss
@@ -803,6 +810,10 @@ struct bnx2x_mf_info {
/* Device information data structure. */
struct bnx2x_devinfo {
+#if 1
+#define NAME_SIZE 128
+ char name[NAME_SIZE];
+#endif
/* PCIe info */
uint16_t vendor_id;
uint16_t device_id;
@@ -820,6 +831,7 @@ struct bnx2x_devinfo {
#define CHIP_ID(sc) ((sc)->devinfo.chip_id & 0xffff0000)
#define CHIP_NUM(sc) ((sc)->devinfo.chip_id >> 16)
/* device ids */
+#define CHIP_NUM_57710 0x164e
#define CHIP_NUM_57711 0x164f
#define CHIP_NUM_57711E 0x1650
#define CHIP_NUM_57712 0x1662
@@ -861,6 +873,8 @@ struct bnx2x_devinfo {
#define CHIP_METAL(sc) ((sc->devinfo.chip_id) & 0x00000ff0)
#define CHIP_BOND_ID(sc) ((sc->devinfo.chip_id) & 0x0000000f)
+#define CHIP_IS_E1(sc) (CHIP_NUM(sc) == CHIP_NUM_57710)
+#define CHIP_IS_57710(sc) (CHIP_NUM(sc) == CHIP_NUM_57710)
#define CHIP_IS_57711(sc) (CHIP_NUM(sc) == CHIP_NUM_57711)
#define CHIP_IS_57711E(sc) (CHIP_NUM(sc) == CHIP_NUM_57711E)
#define CHIP_IS_E1H(sc) ((CHIP_IS_57711(sc)) || \
@@ -1418,7 +1432,7 @@ struct bnx2x_func_init_params {
static inline void
bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val)
{
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
rte_write8(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
@@ -1428,10 +1442,10 @@ bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val)
{
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 2) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 16-bit write to 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 16-bit write to 0x%08lx",
(unsigned long)offset);
#endif
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%04x",
(unsigned long)offset, val);
rte_write16(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
@@ -1442,11 +1456,11 @@ bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
{
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 4) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 32-bit write to 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 32-bit write to 0x%08lx",
(unsigned long)offset);
#endif
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
rte_write32(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
@@ -1457,7 +1471,7 @@ bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
uint8_t val;
val = rte_read8((uint8_t *)sc->bar[BAR0].base_addr + offset);
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
return val;
@@ -1470,12 +1484,12 @@ bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 2) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 16-bit read from 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 16-bit read from 0x%08lx",
(unsigned long)offset);
#endif
val = rte_read16(((uint8_t *)sc->bar[BAR0].base_addr + offset));
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
return val;
@@ -1488,12 +1502,12 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 4) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 32-bit read from 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 32-bit read from 0x%08lx",
(unsigned long)offset);
#endif
val = rte_read32(((uint8_t *)sc->bar[BAR0].base_addr + offset));
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
return val;
@@ -1970,7 +1984,7 @@ bnx2x_set_rx_mode(struct bnx2x_softc *sc)
bnx2x_vf_set_rx_mode(sc);
}
} else {
- PMD_DRV_LOG(NOTICE, "Card is not ready to change mode");
+ PMD_DRV_LOG(NOTICE, sc, "Card is not ready to change mode");
}
}
@@ -1978,7 +1992,7 @@ static inline int pci_read(struct bnx2x_softc *sc, size_t addr,
void *val, uint8_t size)
{
if (rte_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
- PMD_DRV_LOG(ERR, "Can't read from PCI config space");
+ PMD_DRV_LOG(ERR, sc, "Can't read from PCI config space");
return ENXIO;
}
@@ -1991,7 +2005,7 @@ static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
if (rte_pci_write_config(sc->pci_dev, &val16,
sizeof(val16), addr) <= 0) {
- PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ PMD_DRV_LOG(ERR, sc, "Can't write to PCI config space");
return ENXIO;
}
@@ -2003,7 +2017,7 @@ static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val)
uint32_t val32 = val;
if (rte_pci_write_config(sc->pci_dev, &val32,
sizeof(val32), addr) <= 0) {
- PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ PMD_DRV_LOG(ERR, sc, "Can't write to PCI config space");
return ENXIO;
}
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 575271a8..0057843b 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -85,7 +85,7 @@ bnx2x_link_update(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
struct rte_eth_link link;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
bnx2x_link_status_update(sc);
memset(&link, 0, sizeof(link));
@@ -129,9 +129,11 @@ bnx2x_interrupt_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
+ PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
bnx2x_interrupt_action(dev);
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
rte_intr_enable(&sc->pci_dev->intr_handle);
}
@@ -147,8 +149,8 @@ static void bnx2x_periodic_start(void *param)
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
bnx2x_periodic_start, (void *)dev);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to start periodic"
- " timer rc %d", ret);
+ PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
+ " timer rc %d", ret);
assert(false && "Unable to start periodic timer");
}
}
@@ -176,34 +178,34 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
- PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
+ PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
return -EINVAL;
}
sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
if (sc->num_queues > mp_ncpus) {
- PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
+ PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs");
return -EINVAL;
}
- PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
+ PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d",
sc->num_queues, sc->mtu);
/* allocate ilt */
if (bnx2x_alloc_ilt_mem(sc) != 0) {
- PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
+ PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed");
return -ENXIO;
}
/* allocate the host hardware/software hsi structures */
if (bnx2x_alloc_hsi_mem(sc) != 0) {
- PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
+ PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed");
bnx2x_free_ilt_mem(sc);
return -ENXIO;
}
@@ -217,7 +219,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* start the periodic callout */
if (sc->periodic_flags & PERIODIC_STOP)
@@ -225,7 +227,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
ret = bnx2x_init(sc);
if (ret) {
- PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
+ PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret);
return -1;
}
@@ -234,12 +236,12 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
bnx2x_interrupt_handler, (void *)dev);
if (rte_intr_enable(&sc->pci_dev->intr_handle))
- PMD_DRV_LOG(ERR, "rte_intr_enable failed");
+ PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
}
ret = bnx2x_dev_rx_init(dev);
if (ret != 0) {
- PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
+ PMD_DRV_LOG(DEBUG, sc, "bnx2x_dev_rx_init returned error code");
return -3;
}
@@ -255,7 +257,7 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (IS_PF(sc)) {
rte_intr_disable(&sc->pci_dev->intr_handle);
@@ -268,7 +270,7 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
if (ret) {
- PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
+ PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret);
return;
}
@@ -280,7 +282,7 @@ bnx2x_dev_close(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (IS_VF(sc))
bnx2x_vf_close(sc);
@@ -300,7 +302,7 @@ bnx2x_promisc_enable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_PROMISC;
if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
@@ -312,7 +314,7 @@ bnx2x_promisc_disable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
@@ -324,7 +326,7 @@ bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
@@ -336,7 +338,7 @@ bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_PROMISC;
@@ -346,7 +348,9 @@ bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
static int
bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
- PMD_INIT_FUNC_TRACE();
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE(sc);
return bnx2x_link_update(dev);
}
@@ -361,7 +365,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
- PMD_DRV_LOG(ERR, "PF indicated channel is down."
+ PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
"VF device is no longer operational");
dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
@@ -377,7 +381,7 @@ bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
uint64_t brb_drops;
uint64_t brb_truncates;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
@@ -568,27 +572,35 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
{
int ret = 0;
struct rte_pci_device *pci_dev;
+ struct rte_pci_addr pci_addr;
struct bnx2x_softc *sc;
- PMD_INIT_FUNC_TRACE();
+ /* Extract key data structures */
+ sc = eth_dev->data->dev_private;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
+
+ snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
+ pci_addr.bus, pci_addr.devid, pci_addr.function,
+ eth_dev->data->port_id);
+
+ PMD_INIT_FUNC_TRACE(sc);
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- sc = eth_dev->data->dev_private;
sc->pcie_bus = pci_dev->addr.bus;
sc->pcie_device = pci_dev->addr.devid;
- if (is_vf)
- sc->flags = BNX2X_IS_VF_FLAG;
-
sc->devinfo.vendor_id = pci_dev->id.vendor_id;
sc->devinfo.device_id = pci_dev->id.device_id;
sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
+ if (is_vf)
+ sc->flags = BNX2X_IS_VF_FLAG;
+
sc->pcie_func = pci_dev->addr.function;
sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
if (is_vf)
@@ -616,7 +628,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
sc->pci_dev = pci_dev;
ret = bnx2x_attach(sc);
if (ret) {
- PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
+ PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret);
return ret;
}
@@ -625,21 +637,21 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
bnx2x_periodic_start, (void *)eth_dev);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to start periodic"
- " timer rc %d", ret);
+ PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
+ " timer rc %d", ret);
return -EINVAL;
}
}
eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
- PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
+ PMD_DRV_LOG(INFO, sc, "pcie_bus=%d, pcie_device=%d",
sc->pcie_bus, sc->pcie_device);
- PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
+ PMD_DRV_LOG(INFO, sc, "bar0.addr=%p, bar1.addr=%p",
sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
- PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
+ PMD_DRV_LOG(INFO, sc, "port=%d, path=%d, vnic=%d, func=%d",
PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
- PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
+ PMD_DRV_LOG(INFO, sc, "portID=%d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
if (IS_VF(sc)) {
@@ -679,14 +691,16 @@ out:
static int
eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
{
- PMD_INIT_FUNC_TRACE();
+ struct bnx2x_softc *sc = eth_dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE(sc);
return bnx2x_common_dev_init(eth_dev, 0);
}
static int
eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
{
- PMD_INIT_FUNC_TRACE();
+ struct bnx2x_softc *sc = eth_dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE(sc);
return bnx2x_common_dev_init(eth_dev, 1);
}
diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h
index 9e232a9b..753bccdf 100644
--- a/drivers/net/bnx2x/bnx2x_logs.h
+++ b/drivers/net/bnx2x/bnx2x_logs.h
@@ -9,11 +9,11 @@
#define _PMD_LOGS_H_
extern int bnx2x_logtype_init;
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, bnx2x_logtype_init, \
- "%s(): " fmt "\n", __func__, ##args)
+#define PMD_INIT_LOG(level, sc, fmt, args...) \
+ RTE_LOG(level, PMD, \
+ "[bnx2x_pmd: %s] %s() " fmt "\n", (sc)->devinfo.name, __func__, ##args)
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#define PMD_INIT_FUNC_TRACE(sc) PMD_INIT_LOG(DEBUG, sc, " >>")
#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -37,18 +37,19 @@ extern int bnx2x_logtype_init;
#endif
extern int bnx2x_logtype_driver;
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \
- "%s(): " fmt, __func__, ## args)
+#define PMD_DRV_LOG_RAW(level, sc, fmt, args...) \
+ RTE_LOG(level, PMD, "[%s:%d(%s)] " fmt, __func__, __LINE__, \
+ (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args)
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+#define PMD_DRV_LOG(level, sc, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, sc, fmt "\n", ## args)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
-#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(%s): " fmt "\n", __func__, \
+ (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args)
#else
-#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) do { } while(0)
+#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) do { } while (0)
#endif
#endif /* _PMD_LOGS_H_ */
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index d9a4127d..ca28aacc 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -12,19 +12,8 @@ static const struct rte_memzone *
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
uint16_t queue_id, uint32_t ring_size, int socket_id)
{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->device->driver->name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size, socket_id,
- RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE);
+ return rte_eth_dma_zone_reserve(dev, ring_name, queue_id,
+ ring_size, BNX2X_PAGE_SIZE, socket_id);
}
static void
@@ -76,7 +65,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (NULL == rxq) {
- PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!");
return -ENOMEM;
}
rxq->sc = sc;
@@ -92,7 +81,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
sc->rx_ring_size = USABLE_RX_BD(rxq);
rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
- PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, "
+ PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, "
"total_bd=%lu, rx_pages=%u, cq_pages=%u",
queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq),
(unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
@@ -275,7 +264,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_free_thresh = min(txq->tx_free_thresh,
txq->nb_tx_desc - BDS_PER_TX_PKT);
- PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
+ PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, tx_pages=%u",
queue_idx, nb_desc, txq->tx_free_thresh,
(unsigned long)USABLE_TX_BD(txq),
@@ -301,7 +290,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
- /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
/* Link TX pages */
@@ -310,7 +299,9 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
- /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
+ /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu",
+ * (TOTAL_TX_BD_PER_PAGE * i - 1));
+ */
}
txq->queue_id = queue_idx;
@@ -461,9 +452,10 @@ bnx2x_dev_rx_init(struct rte_eth_dev *dev)
void
bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
{
+ struct bnx2x_softc *sc = dev->data->dev_private;
uint8_t i;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index edc86ccc..1cd97259 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -82,7 +82,7 @@ bnx2x_storm_stats_post(struct bnx2x_softc *sc)
sc->fw_stats_req->hdr.drv_stats_counter =
htole16(sc->stats_counter++);
- PMD_DEBUG_PERIODIC_LOG(DEBUG,
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc,
"sending statistics ramrod %d",
le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
@@ -154,7 +154,7 @@ bnx2x_stats_comp(struct bnx2x_softc *sc)
while (*stats_comp != DMAE_COMP_VAL) {
if (!cnt) {
- PMD_DRV_LOG(ERR, "Timeout waiting for stats finished");
+ PMD_DRV_LOG(ERR, sc, "Timeout waiting for stats finished");
break;
}
@@ -189,7 +189,7 @@ bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
}
/* sanity */
if (!sc->port.pmf || !sc->port.port_stx) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -239,7 +239,7 @@ bnx2x_port_stats_init(struct bnx2x_softc *sc)
/* sanity */
if (!sc->link_vars.link_up || !sc->port.pmf) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -463,7 +463,7 @@ bnx2x_func_stats_init(struct bnx2x_softc *sc)
/* sanity */
if (!sc->func_stx) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -797,12 +797,12 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc)
break;
case ELINK_MAC_TYPE_NONE: /* unreached */
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats updated by DMAE but no MAC active");
return -1;
default: /* unreached */
- PMD_DRV_LOG(ERR, "stats update failed, unknown MAC type");
+ PMD_DRV_LOG(ERR, sc, "stats update failed, unknown MAC type");
}
ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
@@ -837,7 +837,7 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc)
nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
if (nig_timer_max != estats->nig_timer_max) {
estats->nig_timer_max = nig_timer_max;
- PMD_DRV_LOG(ERR, "invalid NIG timer max (%u)",
+ PMD_DRV_LOG(ERR, sc, "invalid NIG timer max (%u)",
estats->nig_timer_max);
}
}
@@ -859,7 +859,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
/* are storm stats valid? */
if (le16toh(counters->xstats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by xstorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->xstats_counter), sc->stats_counter);
@@ -867,7 +867,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
}
if (le16toh(counters->ustats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by ustorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->ustats_counter), sc->stats_counter);
@@ -875,7 +875,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
}
if (le16toh(counters->cstats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by cstorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->cstats_counter), sc->stats_counter);
@@ -883,7 +883,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
}
if (le16toh(counters->tstats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by tstorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->tstats_counter), sc->stats_counter);
@@ -929,12 +929,13 @@ bnx2x_storm_stats_update(struct bnx2x_softc *sc)
uint32_t diff;
- /* PMD_DRV_LOG(DEBUG,
+ /* PMD_DRV_LOG(DEBUG, sc,
"queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
xclient->mcast_pkts_sent);
- PMD_DRV_LOG(DEBUG, "---------------"); */
+ PMD_DRV_LOG(DEBUG, sc, "---------------");
+ */
UPDATE_QSTAT(tclient->rcv_bcast_bytes,
total_broadcast_bytes_received);
@@ -1288,7 +1289,7 @@ void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
bnx2x_stats_stm[state][event].action(sc);
if (event != STATS_EVENT_UPDATE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"state %d -> event %d -> state %d",
state, event, sc->stats_state);
}
@@ -1302,7 +1303,7 @@ bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
/* sanity */
if (!sc->port.pmf || !sc->port.port_stx) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -1474,7 +1475,7 @@ bnx2x_stats_init(struct bnx2x_softc *sc)
sc->func_stx = 0;
}
- PMD_DRV_LOG(DEBUG, "port_stx 0x%x func_stx 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "port_stx 0x%x func_stx 0x%x",
sc->port.port_stx, sc->func_stx);
/* pmf should retrieve port statistics from SP on a non-init*/
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index 50099d46..048bf126 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -37,12 +37,12 @@ bnx2x_check_bull(struct bnx2x_softc *sc)
if (bull->crc == bnx2x_vf_crc(bull))
break;
- PMD_DRV_LOG(ERR, "bad crc on bulletin board. contained %x computed %x",
+ PMD_DRV_LOG(ERR, sc, "bad crc on bulletin board. contained %x computed %x",
bull->crc, bnx2x_vf_crc(bull));
++tries;
}
if (tries == BNX2X_VF_BULLETIN_TRIES) {
- PMD_DRV_LOG(ERR, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
+ PMD_DRV_LOG(ERR, sc, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
tries);
return FALSE;
}
@@ -82,7 +82,7 @@ bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
rte_spinlock_lock(&sc->vf2pf_lock);
- PMD_DRV_LOG(DEBUG, "Preparing %d tlv for sending", type);
+ PMD_DRV_LOG(DEBUG, sc, "Preparing %d tlv for sending", type);
memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
@@ -97,7 +97,7 @@ static void
bnx2x_vf_finalize(struct bnx2x_softc *sc,
__rte_unused struct vf_first_tlv *first_tlv)
{
- PMD_DRV_LOG(DEBUG, "done sending [%d] tlv over vf pf channel",
+ PMD_DRV_LOG(DEBUG, sc, "done sending [%d] tlv over vf pf channel",
first_tlv->tl.type);
rte_spinlock_unlock(&sc->vf2pf_lock);
@@ -116,14 +116,14 @@ bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
uint8_t i;
if (*status) {
- PMD_DRV_LOG(ERR, "status should be zero before message"
+ PMD_DRV_LOG(ERR, sc, "status should be zero before message"
" to pf was sent");
return -EINVAL;
}
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
- PMD_DRV_LOG(ERR, "channel is down. Aborting message sending");
+ PMD_DRV_LOG(ERR, sc, "channel is down. Aborting message sending");
return -EINVAL;
}
@@ -143,11 +143,11 @@ bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
}
if (!*status) {
- PMD_DRV_LOG(ERR, "Response from PF timed out");
+ PMD_DRV_LOG(ERR, sc, "Response from PF timed out");
return -EAGAIN;
}
- PMD_DRV_LOG(DEBUG, "Response from PF was received");
+ PMD_DRV_LOG(DEBUG, sc, "Response from PF was received");
return 0;
}
@@ -195,7 +195,7 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
int rc;
do {
- PMD_DRV_LOG(DEBUG, "trying to get resources");
+ PMD_DRV_LOG(DEBUG, sc, "trying to get resources");
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc)
@@ -207,11 +207,11 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
/* check PF to request acceptance */
if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(DEBUG, "resources obtained successfully");
+ PMD_DRV_LOG(DEBUG, sc, "resources obtained successfully");
res_obtained = true;
} else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES &&
tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"PF cannot allocate requested amount of resources");
res_query = &sc->vf2pf_mbox->query[0].acquire.res_query;
@@ -227,7 +227,7 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs));
} else {
- PMD_DRV_LOG(ERR, "Failed to get the requested "
+ PMD_DRV_LOG(ERR, sc, "Failed to get the requested "
"amount of resources: %d.",
sc_resp->status);
return -EINVAL;
@@ -296,7 +296,7 @@ int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_
sc->doorbell_size = sc_resp.db_size;
sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG;
- PMD_DRV_LOG(DEBUG, "status block count = %d, base status block = %x",
+ PMD_DRV_LOG(DEBUG, sc, "status block count = %d, base status block = %x",
sc->igu_sb_cnt, sc->igu_base_sb);
strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver));
@@ -333,7 +333,7 @@ bnx2x_vf_close(struct bnx2x_softc *sc)
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
- PMD_DRV_LOG(ERR, "Failed to release VF");
+ PMD_DRV_LOG(ERR, sc, "Failed to release VF");
bnx2x_vf_finalize(sc, &query->first_tlv);
}
@@ -367,12 +367,12 @@ bnx2x_vf_init(struct bnx2x_softc *sc)
if (rc)
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to init VF");
+ PMD_DRV_LOG(ERR, sc, "Failed to init VF");
rc = -EINVAL;
goto out;
}
- PMD_DRV_LOG(DEBUG, "VF was initialized");
+ PMD_DRV_LOG(DEBUG, sc, "VF was initialized");
out:
bnx2x_vf_finalize(sc, &query->first_tlv);
return rc;
@@ -403,7 +403,7 @@ bnx2x_vf_unload(struct bnx2x_softc *sc)
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad reply for vf_q %d teardown", i);
bnx2x_vf_finalize(sc, &query_op->first_tlv);
@@ -423,7 +423,7 @@ bnx2x_vf_unload(struct bnx2x_softc *sc)
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad reply from PF for close message");
bnx2x_vf_finalize(sc, &query->first_tlv);
@@ -450,7 +450,7 @@ bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
rxq = sc->rx_queues[fp->index];
if (!rxq) {
- PMD_DRV_LOG(ERR, "RX queue %d is NULL", fp->index);
+ PMD_DRV_LOG(ERR, sc, "RX queue %d is NULL", fp->index);
return;
}
@@ -474,7 +474,7 @@ bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
txq = sc->tx_queues[fp->index];
if (!txq) {
- PMD_DRV_LOG(ERR, "TX queue %d is NULL", fp->index);
+ PMD_DRV_LOG(ERR, sc, "TX queue %d is NULL", fp->index);
return;
}
@@ -511,7 +511,7 @@ bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int lead
if (rc)
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to setup VF queue[%d]",
+ PMD_DRV_LOG(ERR, sc, "Failed to setup VF queue[%d]",
fp->index);
rc = -EINVAL;
}
@@ -566,7 +566,7 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
}
if (BNX2X_VF_STATUS_SUCCESS != reply->status) {
- PMD_DRV_LOG(ERR, "Bad reply from PF for SET MAC message: %d",
+ PMD_DRV_LOG(ERR, sc, "Bad reply from PF for SET MAC message: %d",
reply->status);
rc = -EINVAL;
}
@@ -608,7 +608,7 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc,
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure RSS");
+ PMD_DRV_LOG(ERR, sc, "Failed to configure RSS");
rc = -EINVAL;
}
out:
@@ -652,7 +652,7 @@ bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
break;
default:
- PMD_DRV_LOG(ERR, "BAD rx mode (%d)", sc->rx_mode);
+ PMD_DRV_LOG(ERR, sc, "BAD rx mode (%d)", sc->rx_mode);
rc = -EINVAL;
goto out;
}
@@ -666,7 +666,7 @@ bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to set RX mode");
+ PMD_DRV_LOG(ERR, sc, "Failed to set RX mode");
rc = -EINVAL;
}
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 57085ebb..1192e5dd 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -500,6 +500,18 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
#define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+ /* Set non-default values for TXFIR in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_IFIR_MASK 0x00F00000
+ #define PORT_HW_CFG_TX_DRV_IFIR_SHIFT 20
+
+ /* Set non-default values for IPREDRIVER in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK 0x0F000000
+ #define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT 24
+
+ /* Set non-default values for POST2 in SFP mode. */
+ #define PORT_HW_CFG_TX_DRV_POST2_MASK 0xF0000000
+ #define PORT_HW_CFG_TX_DRV_POST2_SHIFT 28
+
uint32_t reserved0[5]; /* 0x17c */
uint32_t aeu_int_mask; /* 0x190 */
@@ -783,6 +795,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858 0x00001200
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
@@ -2532,7 +2545,12 @@ struct shmem2_region {
uint32_t drv_func_info_addr; /* Offset 0x14C */
uint32_t drv_func_info_size; /* Offset 0x150 */
uint32_t link_attr_sync[PORT_MAX]; /* Offset 0x154 */
- #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_ATTR_84858 0x00000002
+ #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
+ #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
+
+ uint32_t link_change_count[PORT_MAX]; /* Offset 0x160-0x164 */
};
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index f2de07e5..97dfe69b 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -741,7 +741,7 @@ static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
if (dis_mask) {
REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
dis_mask);
- ECORE_MSG("Setting parity mask "
+ ECORE_MSG(sc, "Setting parity mask "
"for %s to\t\t0x%x",
ecore_blocks_parity_data[i].name, dis_mask);
}
@@ -776,7 +776,7 @@ static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
reg_val = REG_RD(sc, ecore_blocks_parity_data[i].
sts_clr_addr);
if (reg_val & reg_mask)
- ECORE_MSG("Parity errors in %s: 0x%x",
+ ECORE_MSG(sc, "Parity errors in %s: 0x%x",
ecore_blocks_parity_data[i].name,
reg_val & reg_mask);
}
@@ -785,7 +785,7 @@ static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
/* Check if there were parity attentions in MCP */
reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP);
if (reg_val & mcp_aeu_bits)
- ECORE_MSG("Parity error in MCP: 0x%x",
+ ECORE_MSG(sc, "Parity error in MCP: 0x%x",
reg_val & mcp_aeu_bits);
/* Clear parity attentions in MCP:
diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h
index 2b003afb..733ad1aa 100644
--- a/drivers/net/bnx2x/ecore_init_ops.h
+++ b/drivers/net/bnx2x/ecore_init_ops.h
@@ -424,20 +424,20 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
uint32_t val, i;
if (r_order > MAX_RD_ORD) {
- ECORE_MSG("read order of %d order adjusted to %d",
+ ECORE_MSG(sc, "read order of %d order adjusted to %d",
r_order, MAX_RD_ORD);
r_order = MAX_RD_ORD;
}
if (w_order > MAX_WR_ORD) {
- ECORE_MSG("write order of %d order adjusted to %d",
+ ECORE_MSG(sc, "write order of %d order adjusted to %d",
w_order, MAX_WR_ORD);
w_order = MAX_WR_ORD;
}
if (CHIP_REV_IS_FPGA(sc)) {
- ECORE_MSG("write order adjusted to 1 for FPGA");
+ ECORE_MSG(sc, "write order adjusted to 1 for FPGA");
w_order = 0;
}
- ECORE_MSG("read order %d write order %d", r_order, w_order);
+ ECORE_MSG(sc, "read order %d write order %d", r_order, w_order);
for (i = 0; i < NUM_RD_Q-1; i++) {
REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l);
diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h
index ae8a93bb..d69e857b 100644
--- a/drivers/net/bnx2x/ecore_reg.h
+++ b/drivers/net/bnx2x/ecore_reg.h
@@ -1967,6 +1967,7 @@
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_NVRAM 12
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 0c8685c6..ab730abe 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -53,14 +53,14 @@ ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
o->execute = exec;
o->get = get;
- ECORE_MSG("Setup the execution queue with the chunk length of %d",
+ ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
exe_len);
}
static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
struct ecore_exeq_elem *elem)
{
- ECORE_MSG("Deleting an exe_queue element");
+ ECORE_MSG(sc, "Deleting an exe_queue element");
ECORE_FREE(sc, elem, sizeof(*elem));
}
@@ -106,7 +106,7 @@ static int ecore_exe_queue_add(struct bnx2x_softc *sc,
/* Check if this request is ok */
rc = o->validate(sc, o->owner, elem);
if (rc) {
- ECORE_MSG("Preamble failed: %d", rc);
+ ECORE_MSG(sc, "Preamble failed: %d", rc);
goto free_and_exit;
}
}
@@ -176,8 +176,8 @@ static int ecore_exe_queue_step(struct bnx2x_softc *sc,
*/
if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- ECORE_MSG
- ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
+ ECORE_MSG(sc,
+ "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
__ecore_exe_queue_reset_pending(sc, o);
} else {
return ECORE_PENDING;
@@ -240,7 +240,7 @@ static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
bnx2x_softc *sc
__rte_unused)
{
- ECORE_MSG("Allocating a new exe_queue element");
+ ECORE_MSG(sc, "Allocating a new exe_queue element");
return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
}
@@ -290,14 +290,14 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state,
if (CHIP_REV_IS_EMUL(sc))
cnt *= 20;
- ECORE_MSG("waiting for state to become %d", state);
+ ECORE_MSG(sc, "waiting for state to become %d", state);
ECORE_MIGHT_SLEEP();
while (cnt--) {
bnx2x_intr_legacy(sc, 1);
if (!ECORE_TEST_BIT(state, pstate)) {
#ifdef ECORE_STOP_ON_ERROR
- ECORE_MSG("exit (cnt %d)", 5000 - cnt);
+ ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
#endif
return ECORE_SUCCESS;
}
@@ -309,7 +309,7 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state,
}
/* timeout! */
- PMD_DRV_LOG(ERR, "timeout waiting for state %d", state);
+ PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
@@ -370,11 +370,11 @@ static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
struct ecore_vlan_mac_obj *o)
{
if (o->head_reader) {
- ECORE_MSG("vlan_mac_lock writer - There are readers; Busy");
+ ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
return ECORE_BUSY;
}
- ECORE_MSG("vlan_mac_lock writer - Taken");
+ ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
return ECORE_SUCCESS;
}
@@ -394,13 +394,13 @@ static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
int rc;
unsigned long ramrod_flags = o->saved_ramrod_flags;
- ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu",
+ ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu",
ramrod_flags);
o->head_exe_request = FALSE;
o->saved_ramrod_flags = 0;
rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
if (rc != ECORE_SUCCESS) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"execution of pending commands failed with rc %d",
rc);
#ifdef ECORE_STOP_ON_ERROR
@@ -425,7 +425,7 @@ static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
{
o->head_exe_request = TRUE;
o->saved_ramrod_flags = ramrod_flags;
- ECORE_MSG("Placing pending execution with ramrod flags %lu",
+ ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu",
ramrod_flags);
}
@@ -446,8 +446,8 @@ static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
* executed. If so, execute again. [Ad infinitum]
*/
while (o->head_exe_request) {
- ECORE_MSG
- ("vlan_mac_lock - writer release encountered a pending request");
+ ECORE_MSG(sc,
+ "vlan_mac_lock - writer release encountered a pending request");
__ecore_vlan_mac_h_exec_pending(sc, o);
}
}
@@ -483,7 +483,8 @@ static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
{
/* If we got here, we're holding lock --> no WRITER exists */
o->head_reader++;
- ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader);
+ ECORE_MSG(sc,
+ "vlan_mac_lock - locked reader - number %d", o->head_reader);
return ECORE_SUCCESS;
}
@@ -522,14 +523,14 @@ static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
struct ecore_vlan_mac_obj *o)
{
if (!o->head_reader) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Need to release vlan mac reader lock, but lock isn't taken");
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
} else {
o->head_reader--;
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"vlan_mac_lock - decreased readers to %d",
o->head_reader);
}
@@ -538,7 +539,7 @@ static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
* was last - if so we need to execute the command.
*/
if (!o->head_reader && o->head_exe_request) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"vlan_mac_lock - reader release encountered a pending request");
/* Writer release will do the trick */
@@ -581,10 +582,10 @@ static int ecore_get_n_elements(struct bnx2x_softc *sc,
uint8_t *next = base;
int counter = 0, read_lock;
- ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
read_lock = ecore_vlan_mac_h_read_lock(sc, o);
if (read_lock != ECORE_SUCCESS)
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"get_n_elements failed to get vlan mac reader lock; Access without lock");
/* traverse list */
@@ -593,15 +594,15 @@ static int ecore_get_n_elements(struct bnx2x_softc *sc,
if (counter < n) {
ECORE_MEMCPY(next, &pos->u, size);
counter++;
- ECORE_MSG
- ("copied element number %d to address %p element was:",
+ ECORE_MSG
+ (sc, "copied element number %d to address %p element was:",
counter, next);
next += stride + size;
}
}
if (read_lock == ECORE_SUCCESS) {
- ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
ecore_vlan_mac_h_read_unlock(sc, o);
}
@@ -615,7 +616,7 @@ static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
{
struct ecore_vlan_mac_registry_elem *pos;
- ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
+ ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
@@ -644,7 +645,7 @@ static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_sof
{
struct ecore_vlan_mac_registry_elem *pos;
- ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
+ ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
@@ -722,7 +723,7 @@ static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
if (index > ECORE_LLH_CAM_MAX_PF_LINE)
return;
- ECORE_MSG("Going to %s LLH configuration at entry %d",
+ ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
(add ? "ADD" : "DELETE"), index);
if (add) {
@@ -838,7 +839,7 @@ static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
&rule_entry->mac.header);
- ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
+ ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
(add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
mac[4], mac[5], raw->cl_id);
@@ -943,7 +944,7 @@ static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
cfg_entry);
- ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
+ ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
(add ? "setting" : "clearing"),
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
o->raw.cl_id, cam_offset);
@@ -1088,8 +1089,8 @@ static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
/* Check the registry */
rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
if (rc) {
- ECORE_MSG
- ("ADD command is not allowed considering current registry state.");
+ ECORE_MSG(sc,
+ "ADD command is not allowed considering current registry state.");
return rc;
}
@@ -1097,7 +1098,7 @@ static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
* MAC/VLAN/VLAN-MAC. Return an error if there is.
*/
if (exeq->get(exeq, elem)) {
- ECORE_MSG("There is a pending ADD command already");
+ ECORE_MSG(sc, "There is a pending ADD command already");
return ECORE_EXISTS;
}
@@ -1136,8 +1137,8 @@ static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
*/
pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
if (!pos) {
- ECORE_MSG
- ("DEL command is not allowed considering current registry state");
+ ECORE_MSG(sc,
+ "DEL command is not allowed considering current registry state");
return ECORE_EXISTS;
}
@@ -1149,13 +1150,13 @@ static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
/* Check for MOVE commands */
query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
if (exeq->get(exeq, &query_elem)) {
- PMD_DRV_LOG(ERR, "There is a pending MOVE command already");
+ PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
return ECORE_INVAL;
}
/* Check for DEL commands */
if (exeq->get(exeq, elem)) {
- ECORE_MSG("There is a pending DEL command already");
+ ECORE_MSG(sc, "There is a pending DEL command already");
return ECORE_EXISTS;
}
@@ -1163,7 +1164,7 @@ static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
&elem->cmd_data.vlan_mac.vlan_mac_flags) ||
o->put_credit(o))) {
- PMD_DRV_LOG(ERR, "Failed to return a credit");
+ PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
return ECORE_INVAL;
}
@@ -1196,8 +1197,8 @@ static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
* state.
*/
if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
- ECORE_MSG
- ("MOVE command is not allowed considering current registry state");
+ ECORE_MSG(sc,
+ "MOVE command is not allowed considering current registry state");
return ECORE_INVAL;
}
@@ -1210,21 +1211,21 @@ static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
/* Check DEL on source */
query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
if (src_exeq->get(src_exeq, &query_elem)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"There is a pending DEL command on the source queue already");
return ECORE_INVAL;
}
/* Check MOVE on source */
if (src_exeq->get(src_exeq, elem)) {
- ECORE_MSG("There is a pending MOVE command already");
+ ECORE_MSG(sc, "There is a pending MOVE command already");
return ECORE_EXISTS;
}
/* Check ADD on destination */
query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
if (dest_exeq->get(dest_exeq, &query_elem)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"There is a pending ADD command on the destination queue already");
return ECORE_INVAL;
}
@@ -1329,7 +1330,7 @@ static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
- ECORE_MSG("vlan_mac_execute_step - trying to take writer lock");
+ ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
rc = __ecore_vlan_mac_h_write_trylock(sc, o);
if (rc != ECORE_SUCCESS) {
@@ -1426,17 +1427,17 @@ static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
&pos->cmd_data.vlan_mac.vlan_mac_flags)) {
if ((query.cmd_data.vlan_mac.cmd ==
ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to return the credit for the optimized ADD command");
return ECORE_INVAL;
} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to recover the credit from the optimized DEL command");
return ECORE_INVAL;
}
}
- ECORE_MSG("Optimizing %s command",
+ ECORE_MSG(sc, "Optimizing %s command",
(elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
"ADD" : "DEL");
@@ -1486,7 +1487,7 @@ static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
return ECORE_INVAL;
}
- ECORE_MSG("Got cam offset %d", reg_elem->cam_offset);
+ ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
/* Set a VLAN-MAC data */
ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
@@ -1695,8 +1696,8 @@ int ecore_config_vlan_mac(struct bnx2x_softc *sc,
rc = ECORE_PENDING;
if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- ECORE_MSG
- ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
+ ECORE_MSG(sc,
+ "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
raw->clear_pending(raw);
}
@@ -1775,7 +1776,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
*vlan_mac_flags) {
rc = exeq->remove(sc, exeq->owner, exeq_pos);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to remove command");
+ PMD_DRV_LOG(ERR, sc, "Failed to remove command");
ECORE_SPIN_UNLOCK_BH(&exeq->lock);
return rc;
}
@@ -1800,7 +1801,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
- ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
read_lock = ecore_vlan_mac_h_read_lock(sc, o);
if (read_lock != ECORE_SUCCESS)
return read_lock;
@@ -1812,7 +1813,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = ecore_config_vlan_mac(sc, &p);
if (rc < 0) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to add a new DEL command");
ecore_vlan_mac_h_read_unlock(sc, o);
return rc;
@@ -1820,7 +1821,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
}
}
- ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
ecore_vlan_mac_h_read_unlock(sc, o);
p.ramrod_flags = *ramrod_flags;
@@ -2007,7 +2008,7 @@ static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
mac_filters->unmatched_unicast | mask :
mac_filters->unmatched_unicast & ~mask;
- ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
+ ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
"accp_mcast 0x%xaccp_bcast 0x%x",
mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
@@ -2153,8 +2154,8 @@ static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
*/
ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
- ECORE_MSG
- ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
+ ECORE_MSG
+ (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
/* No need for an explicit memory barrier here as long we would
@@ -2207,7 +2208,7 @@ int ecore_config_rx_mode(struct bnx2x_softc *sc,
return rc;
}
} else {
- ECORE_MSG("ERROR: config_rx_mode is NULL");
+ ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
return -1;
}
@@ -2288,7 +2289,7 @@ static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
if (!new_cmd)
return ECORE_NOMEM;
- ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d",
+ ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
cmd, macs_list_len);
ECORE_LIST_INIT(&new_cmd->data.macs_head);
@@ -2324,7 +2325,7 @@ static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
default:
ECORE_FREE(sc, new_cmd, total_sz);
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -2436,11 +2437,11 @@ static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return;
}
- ECORE_MSG("%s bin %d",
+ ECORE_MSG(sc, "%s bin %d",
((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
"Setting" : "Clearing"), bin);
@@ -2475,7 +2476,7 @@ static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG("About to configure a bin %d", cur_bin);
+ ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
/* Break if we reached the maximum number
* of rules.
@@ -2507,8 +2508,8 @@ static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG
- ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
+ ECORE_MSG
+ (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
@@ -2543,7 +2544,7 @@ static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
cmd_pos->data.macs_num--;
- ECORE_MSG("Deleting MAC. %d left,cnt is %d",
+ ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
cmd_pos->data.macs_num, cnt);
/* Break if we reached the maximum
@@ -2602,7 +2603,8 @@ static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type);
+ PMD_DRV_LOG(ERR, sc,
+ "Unknown command: %d", cmd_pos->type);
return ECORE_INVAL;
}
@@ -2639,8 +2641,8 @@ static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG
- ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
+ ECORE_MSG
+ (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
}
@@ -2660,7 +2662,8 @@ static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1);
+ ECORE_MSG(sc,
+ "Deleting MAC. %d left", p->mcast_list_len - i - 1);
}
*line_idx = cnt;
@@ -2686,7 +2689,7 @@ static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
struct ecore_mcast_obj *o = p->mcast_obj;
int cnt = start_cnt;
- ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len);
+ ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
switch (cmd) {
case ECORE_MCAST_CMD_ADD:
@@ -2702,7 +2705,7 @@ static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -2747,7 +2750,7 @@ static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -2933,8 +2936,8 @@ static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
ECORE_57711_SET_MC_FILTER(mc_filter, bit);
- ECORE_MSG
- ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
+ ECORE_MSG
+ (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
bit);
@@ -2954,7 +2957,7 @@ static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
for (bit = ecore_mcast_get_next_bin(o, 0);
bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
ECORE_57711_SET_MC_FILTER(mc_filter, bit);
- ECORE_MSG("About to set bin %d", bit);
+ ECORE_MSG(sc, "About to set bin %d", bit);
}
}
@@ -2985,7 +2988,7 @@ static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
break;
case ECORE_MCAST_CMD_DEL:
- ECORE_MSG("Invalidating multicast MACs configuration");
+ ECORE_MSG(sc, "Invalidating multicast MACs configuration");
/* clear the registry */
ECORE_MEMSET(o->registry.aprox_match.vec, 0,
@@ -2997,7 +3000,7 @@ static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -3048,8 +3051,8 @@ int ecore_config_mcast(struct bnx2x_softc *sc,
if ((!p->mcast_list_len) && (!o->check_sched(o)))
return ECORE_SUCCESS;
- ECORE_MSG
- ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
+ ECORE_MSG
+ (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
/* Enqueue the current command to the pending list if we can't complete
@@ -3478,7 +3481,7 @@ static int ecore_setup_rss(struct bnx2x_softc *sc,
ECORE_MEMSET(data, 0, sizeof(*data));
- ECORE_MSG("Configuring RSS");
+ ECORE_MSG(sc, "Configuring RSS");
/* Set an echo field */
data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
@@ -3492,7 +3495,7 @@ static int ecore_setup_rss(struct bnx2x_softc *sc,
data->rss_mode = rss_mode;
- ECORE_MSG("rss_mode=%d", rss_mode);
+ ECORE_MSG(sc, "rss_mode=%d", rss_mode);
/* RSS capabilities */
if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
@@ -3532,7 +3535,7 @@ static int ecore_setup_rss(struct bnx2x_softc *sc,
/* RSS engine ID */
data->rss_engine_id = o->engine_id;
- ECORE_MSG("rss_engine_id=%d", data->rss_engine_id);
+ ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
/* Indirection table */
ECORE_MEMCPY(data->indirection_table, p->ind_table,
@@ -3627,15 +3630,15 @@ int ecore_queue_state_change(struct bnx2x_softc *sc,
/* Check that the requested transition is legal */
rc = o->check_transition(sc, o, params);
if (rc) {
- PMD_DRV_LOG(ERR, "check transition returned an error. rc %d",
+ PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
rc);
return ECORE_INVAL;
}
/* Set "pending" bit */
- ECORE_MSG("pending bit was=%lx", o->pending);
+ ECORE_MSG(sc, "pending bit was=%lx", o->pending);
pending_bit = o->set_pending(o, params);
- ECORE_MSG("pending bit now=%lx", o->pending);
+ ECORE_MSG(sc, "pending bit now=%lx", o->pending);
/* Don't send a command if only driver cleanup was requested */
if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
@@ -3702,7 +3705,7 @@ static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
unsigned long cur_pending = o->pending;
if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
cur_pending, o->next_state);
@@ -3713,15 +3716,15 @@ static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
/* >= because tx only must always be smaller than cos since the
* primary connection supports COS 0
*/
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"illegal value for next tx_only: %d. max cos was %d",
o->next_tx_only, o->max_cos);
- ECORE_MSG("Completing command %d for queue %d, setting state to %d",
+ ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
if (o->next_tx_only) /* print num tx-only if any exist */
- ECORE_MSG("primary cid %d: num tx-only cons %d",
+ ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
o->state = o->next_state;
@@ -3782,7 +3785,7 @@ static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
- ECORE_MSG("flags: active %d, cos %d, stats en %d",
+ ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
gen_data->activate_flg, gen_data->cos,
gen_data->statistics_en_flg);
}
@@ -3923,7 +3926,7 @@ static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queu
ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
&data->tx, &cmd_params->params.tx_only.flags);
- ECORE_MSG("cid %d, tx bd page lo %x hi %x",
+ ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
cmd_params->q_obj->cids[0],
data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
}
@@ -3973,9 +3976,9 @@ static int ecore_q_init(struct bnx2x_softc *sc,
/* Set CDU context validation values */
for (cos = 0; cos < o->max_cos; cos++) {
- ECORE_MSG("setting context validation. cid %d, cos %d",
+ ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
o->cids[cos], cos);
- ECORE_MSG("context pointer %p", init->cxts[cos]);
+ ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
}
@@ -4059,15 +4062,15 @@ static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue
if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
- ECORE_MSG("sending forward tx-only ramrod");
+ ECORE_MSG(sc, "sending forward tx-only ramrod");
if (cid_index >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_index);
return ECORE_INVAL;
}
- ECORE_MSG("parameters received: cos: %d sp-id: %d",
+ ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
tx_only_params->gen_params.cos,
tx_only_params->gen_params.spcl_id);
@@ -4077,8 +4080,8 @@ static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue
/* Fill the ramrod data */
ecore_q_fill_setup_tx_only(sc, params, rdata);
- ECORE_MSG
- ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
+ ECORE_MSG
+ (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
@@ -4173,7 +4176,7 @@ static int ecore_q_send_update(struct bnx2x_softc *sc,
uint8_t cid_index = update_params->cid_index;
if (cid_index >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_index);
return ECORE_INVAL;
}
@@ -4267,7 +4270,7 @@ static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
uint8_t cid_idx = params->params.cfc_del.cid_index;
if (cid_idx >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_idx);
return ECORE_INVAL;
}
@@ -4283,7 +4286,7 @@ static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_sta
uint8_t cid_index = params->params.terminate.cid_index;
if (cid_index >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_index);
return ECORE_INVAL;
}
@@ -4327,7 +4330,7 @@ static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_s
case ECORE_Q_CMD_EMPTY:
return ecore_q_send_empty(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -4350,7 +4353,7 @@ static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
case ECORE_Q_CMD_EMPTY:
return ecore_queue_send_cmd_cmn(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -4373,7 +4376,7 @@ static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
case ECORE_Q_CMD_EMPTY:
return ecore_queue_send_cmd_cmn(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -4416,7 +4419,7 @@ static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
* the previous one.
*/
if (o->pending) {
- PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx",
+ PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx",
o->pending);
return ECORE_BUSY;
}
@@ -4543,19 +4546,19 @@ static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Illegal state: %d", state);
+ PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
}
/* Transition is assured */
if (next_state != ECORE_Q_STATE_MAX) {
- ECORE_MSG("Good state transition: %d(%d)->%d",
+ ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
state, cmd, next_state);
o->next_state = next_state;
o->next_tx_only = next_tx_only;
return ECORE_SUCCESS;
}
- ECORE_MSG("Bad state transition request: %d %d", state, cmd);
+ ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
return ECORE_INVAL;
}
@@ -4606,18 +4609,18 @@ static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Illegal state: %d", state);
+ PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
}
/* Transition is assured */
if (next_state != ECORE_Q_STATE_MAX) {
- ECORE_MSG("Good state transition: %d(%d)->%d",
+ ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
state, cmd, next_state);
o->next_state = next_state;
return ECORE_SUCCESS;
}
- ECORE_MSG("Bad state transition request: %d %d", state, cmd);
+ ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
return ECORE_INVAL;
}
@@ -4697,14 +4700,14 @@ ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
unsigned long cur_pending = o->pending;
if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
o->next_state);
return ECORE_INVAL;
}
- ECORE_MSG("Completing command %d for func %d, setting state to %d",
+ ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
cmd, ECORE_FUNC_ID(sc), o->next_state);
o->state = o->next_state;
@@ -4827,18 +4830,19 @@ static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown state: %d", state);
+ PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
}
/* Transition is assured */
if (next_state != ECORE_F_STATE_MAX) {
- ECORE_MSG("Good function state transition: %d(%d)->%d",
+ ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
state, cmd, next_state);
o->next_state = next_state;
return ECORE_SUCCESS;
}
- ECORE_MSG("Bad function state transition request: %d %d", state, cmd);
+ ECORE_MSG(sc,
+ "Bad function state transition request: %d %d", state, cmd);
return ECORE_INVAL;
}
@@ -4928,13 +4932,13 @@ static int ecore_func_hw_init(struct bnx2x_softc *sc,
const struct ecore_func_sp_drv_ops *drv = o->drv;
int rc = 0;
- ECORE_MSG("function %d load_code %x",
+ ECORE_MSG(sc, "function %d load_code %x",
ECORE_ABS_FUNC_ID(sc), load_code);
/* Prepare FW */
rc = drv->init_fw(sc);
if (rc) {
- PMD_DRV_LOG(ERR, "Error loading firmware");
+ PMD_DRV_LOG(ERR, sc, "Error loading firmware");
goto init_err;
}
@@ -4965,7 +4969,7 @@ static int ecore_func_hw_init(struct bnx2x_softc *sc,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP",
+ PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
load_code);
rc = ECORE_INVAL;
}
@@ -5041,7 +5045,7 @@ static int ecore_func_hw_reset(struct bnx2x_softc *sc,
struct ecore_func_sp_obj *o = params->f_obj;
const struct ecore_func_sp_drv_ops *drv = o->drv;
- ECORE_MSG("function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
+ ECORE_MSG(sc, "function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
reset_phase);
switch (reset_phase) {
@@ -5055,7 +5059,7 @@ static int ecore_func_hw_reset(struct bnx2x_softc *sc,
ecore_func_reset_func(sc, drv);
break;
default:
- PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP",
+ PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
reset_phase);
break;
}
@@ -5146,7 +5150,7 @@ static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
*/
- ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
+ ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
rdata->vif_id,
rdata->afex_default_vlan, rdata->allowed_priorities);
@@ -5184,8 +5188,8 @@ inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
* (inside ecore_sp_post()).
*/
- ECORE_MSG
- ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
+ ECORE_MSG
+ (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
rdata->afex_vif_list_command, rdata->vif_list_index,
rdata->func_bit_map, rdata->func_to_clear);
@@ -5256,7 +5260,7 @@ static int ecore_func_send_cmd(struct bnx2x_softc *sc,
case ECORE_F_CMD_SWITCH_UPDATE:
return ecore_func_send_switch_update(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -5317,7 +5321,7 @@ int ecore_func_state_change(struct bnx2x_softc *sc,
}
if (rc == ECORE_BUSY) {
ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"timeout waiting for previous ramrod completion");
return rc;
}
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index 6b65a496..f295bf5a 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -215,8 +215,8 @@ ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
} while (0)
-#define ECORE_MSG(m, ...) \
- PMD_DRV_LOG(DEBUG, m, ##__VA_ARGS__)
+#define ECORE_MSG(sc, m, ...) \
+ PMD_DRV_LOG(DEBUG, sc, m, ##__VA_ARGS__)
typedef struct _ecore_list_entry_t
{
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index b63fd23e..d5693668 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -18,166 +18,159 @@
#include "ecore_hsi.h"
#include "ecore_reg.h"
-static elink_status_t elink_link_reset(struct elink_params *params,
- struct elink_vars *vars,
- uint8_t reset_ext_phy);
-static elink_status_t elink_check_half_open_conn(struct elink_params *params,
- struct elink_vars *vars,
- uint8_t notify);
-static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
- struct elink_params *params);
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
-#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
-#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
-#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
-#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+ #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+ #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+ #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+ #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
#define MDIO_REG_BANK_CL73_IEEEB1 0x10
-#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
-#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
-#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
-#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
-#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
-#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
-#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
-#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
-#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
-#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
-#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
-#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
-#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
-#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
-#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
-#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
+ #define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+ #define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+ #define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+ #define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+ #define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+ #define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+ #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+ #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+ #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+ #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+ #define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+ #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+ #define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+ #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+ #define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+ #define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
#define MDIO_REG_BANK_RX0 0x80b0
-#define MDIO_RX0_RX_STATUS 0x10
-#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
-#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
-#define MDIO_RX0_RX_EQ_BOOST 0x1c
-#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
-#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+ #define MDIO_RX0_RX_STATUS 0x10
+ #define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+ #define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+ #define MDIO_RX0_RX_EQ_BOOST 0x1c
+ #define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+ #define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
#define MDIO_REG_BANK_RX1 0x80c0
-#define MDIO_RX1_RX_EQ_BOOST 0x1c
-#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
-#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+ #define MDIO_RX1_RX_EQ_BOOST 0x1c
+ #define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+ #define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
#define MDIO_REG_BANK_RX2 0x80d0
-#define MDIO_RX2_RX_EQ_BOOST 0x1c
-#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
-#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+ #define MDIO_RX2_RX_EQ_BOOST 0x1c
+ #define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+ #define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
#define MDIO_REG_BANK_RX3 0x80e0
-#define MDIO_RX3_RX_EQ_BOOST 0x1c
-#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
-#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+ #define MDIO_RX3_RX_EQ_BOOST 0x1c
+ #define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+ #define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
#define MDIO_REG_BANK_RX_ALL 0x80f0
-#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
-#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
-#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+ #define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+ #define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+ #define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
#define MDIO_REG_BANK_TX0 0x8060
-#define MDIO_TX0_TX_DRIVER 0x17
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
-#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
-#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
-#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+ #define MDIO_TX0_TX_DRIVER 0x17
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+ #define MDIO_TX0_TX_DRIVER_ICBUF1T 1
#define MDIO_REG_BANK_TX1 0x8070
-#define MDIO_TX1_TX_DRIVER 0x17
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
-#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
-#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
-#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+ #define MDIO_TX1_TX_DRIVER 0x17
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+ #define MDIO_TX0_TX_DRIVER_ICBUF1T 1
#define MDIO_REG_BANK_TX2 0x8080
-#define MDIO_TX2_TX_DRIVER 0x17
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
-#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
-#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
-#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+ #define MDIO_TX2_TX_DRIVER 0x17
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+ #define MDIO_TX0_TX_DRIVER_ICBUF1T 1
#define MDIO_REG_BANK_TX3 0x8090
-#define MDIO_TX3_TX_DRIVER 0x17
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
-#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
-#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
-#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
-#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
-#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
-#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+ #define MDIO_TX3_TX_DRIVER 0x17
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+ #define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+ #define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+ #define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+ #define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+ #define MDIO_TX0_TX_DRIVER_ICBUF1T 1
#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
-#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+ #define MDIO_BLOCK0_XGXS_CONTROL 0x10
#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
-#define MDIO_BLOCK1_LANE_CTRL0 0x15
-#define MDIO_BLOCK1_LANE_CTRL1 0x16
-#define MDIO_BLOCK1_LANE_CTRL2 0x17
-#define MDIO_BLOCK1_LANE_PRBS 0x19
+ #define MDIO_BLOCK1_LANE_CTRL0 0x15
+ #define MDIO_BLOCK1_LANE_CTRL1 0x16
+ #define MDIO_BLOCK1_LANE_CTRL2 0x17
+ #define MDIO_BLOCK1_LANE_PRBS 0x19
#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
-#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
-#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
-#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
-#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
-#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
-#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
-#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
-#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
-#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+ #define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+ #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+ #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+ #define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+ #define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+ #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+ #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+ #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+ #define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
#define MDIO_REG_BANK_GP_STATUS 0x8120
#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
-#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
+
#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
@@ -320,6 +313,7 @@ bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
Theotherbitsarereservedandshouldbezero*/
#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
#define MDIO_PMA_DEVAD 0x1
/*ieee*/
#define MDIO_PMA_REG_CTRL 0x0
@@ -328,7 +322,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_TX_DISABLE 0x0009
#define MDIO_PMA_REG_RX_SD 0xa
/*bnx2x*/
-#define MDIO_PMA_REG_BNX2X_CTRL 0x0096
+#define MDIO_PMA_REG_BCM_CTRL 0x0096
#define MDIO_PMA_REG_FEC_CTRL 0x00ab
#define MDIO_PMA_LASI_RXCTRL 0x9000
#define MDIO_PMA_LASI_TXCTRL 0x9001
@@ -343,8 +337,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
#define MDIO_PMA_REG_MISC_CTRL 0xca0a
#define MDIO_PMA_REG_GEN_CTRL 0xca10
-#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
-#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+ #define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+ #define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
#define MDIO_PMA_REG_ROM_VER1 0xca19
@@ -358,21 +352,21 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_MISC_CTRL1 0xca85
#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
-#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
-#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
-#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
-#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
-#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
-#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+ #define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
-#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+ #define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
@@ -404,6 +398,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
#define MDIO_WIS_DEVAD 0x2
/*bnx2x*/
#define MDIO_WIS_REG_LASI_CNTL 0x9002
@@ -415,13 +411,15 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
-#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+ #define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
-#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
-#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
-#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+ #define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+ #define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+ #define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
#define MDIO_XS_DEVAD 0x4
#define MDIO_XS_REG_STATUS 0x0001
#define MDIO_XS_PLL_SEQUENCER 0x8000
@@ -439,12 +437,12 @@ Theotherbitsarereservedandshouldbezero*/
/*ieee*/
#define MDIO_AN_REG_CTRL 0x0000
#define MDIO_AN_REG_STATUS 0x0001
-#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+ #define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
#define MDIO_AN_REG_ADV_PAUSE 0x0010
-#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
-#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
-#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
-#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+ #define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+ #define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+ #define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+ #define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
#define MDIO_AN_REG_ADV 0x0011
#define MDIO_AN_REG_ADV2 0x0012
#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
@@ -465,13 +463,16 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
-#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
+ #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB 0xffe2
+ #define BNX2X84858_PHY_ID 0x600d
+#define MDIO_AN_REG_848xx_ID_LSB 0xffe3
#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0
-#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
+ #define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008
#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
@@ -480,62 +481,62 @@ Theotherbitsarereservedandshouldbezero*/
/* BNX2X84823 only */
#define MDIO_CTL_DEVAD 0x1e
#define MDIO_CTL_REG_84823_MEDIA 0x401a
-#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ #define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
/* These pins configure the BNX2X84823 interface to MAC after reset. */
-#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
-#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ #define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+ #define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
/* These pins configure the BNX2X84823 interface to Line after reset. */
-#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
-#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
-#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ #define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+ #define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+ #define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
/* When this pin is active high during reset, 10GBASE-T core is power
* down, When it is active low the 10GBASE-T is power up
*/
-#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
-#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
-#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
-#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
-#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+ #define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+ #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+ #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+ #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+ #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+ #define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
-#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+ #define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+ #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BNX2X84833 only */
#define MDIO_84833_TOP_CFG_FW_REV 0x400f
-#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
-#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
-#define MDIO_84833_SUPER_ISOLATE 0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
-
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
-#define PHY84833_CMD_GET_EEE_MODE 0x8008
-#define PHY84833_CMD_SET_EEE_MODE 0x8009
-#define PHY84833_CMD_GET_CURRENT_TEMP 0x8031
-/* Mailbox status set used by 84833. */
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
+
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001
+#define PHY848xx_CMD_GET_EEE_MODE 0x8008
+#define PHY848xx_CMD_SET_EEE_MODE 0x8009
+#define PHY848xx_CMD_GET_CURRENT_TEMP 0x8031
+/* Mailbox status set used by 84833 only */
#define PHY84833_STATUS_CMD_RECEIVED 0x0001
#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
@@ -545,6 +546,19 @@ Theotherbitsarereservedandshouldbezero*/
#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+/* Mailbox Process */
+#define PHY84833_MB_PROCESS1 1
+#define PHY84833_MB_PROCESS2 2
+#define PHY84833_MB_PROCESS3 3
+
+
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED 0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb
+
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
@@ -553,8 +567,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
-#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
-#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+ #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+ #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
#define MDIO_WC_REG_PCS_STATUS2 0x0021
#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096
#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
@@ -570,6 +584,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET 0x01
+#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK 0x000e
#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
@@ -585,7 +601,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXSBLK2_LANE_RESET 0x810a
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
@@ -599,35 +617,35 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
-#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
-#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
-#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
-#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
+ #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000
+ #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100
+ #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010
+ #define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1
#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
-#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+ #define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
#define MDIO_WC_REG_DSC1B0_UC_CTRL 0x820e
#define MDIO_WC_REG_DSC1B0_UC_CTRL_RDY4CMD (1<<7)
#define MDIO_WC_REG_DSC_SMC 0x8213
#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
-#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
-#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
-#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
-#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
-#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
-#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
-#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+ #define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+ #define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+ #define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+ #define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+ #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+ #define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+ #define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2
#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
@@ -689,8 +707,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
-#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
-#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
@@ -700,33 +718,31 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_MII_STATUS 0x1
#define MDIO_REG_GPHY_PHYID_LSB 0x3
#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
-#define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000
-#define MDIO_REG_GPHY_CL45_REG_READ 0xc000
+ #define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000
+ #define MDIO_REG_GPHY_CL45_REG_READ 0xc000
#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
-#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+ #define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
#define MDIO_REG_GPHY_EXP_ACCESS 0x17
-#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
-#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
+ #define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
+ #define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
-#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+ #define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
#define MDIO_REG_GPHY_SHADOW 0x1c
-#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
-#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
-#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
-#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
-#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
-
-typedef elink_status_t(*read_sfp_module_eeprom_func_p) (struct elink_phy * phy,
- struct elink_params *
- params,
- uint8_t dev_addr,
- uint16_t addr,
- uint8_t byte_cnt,
- uint8_t * o_buf,
- uint8_t);
+ #define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
+ #define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+ #define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+ #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+ #define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+
+typedef elink_status_t (*read_sfp_module_eeprom_func_p)(struct elink_phy *phy,
+ struct elink_params *params,
+ uint8_t dev_addr, uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t *o_buf, uint8_t);
/********************************************************/
#define ELINK_ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -850,21 +866,29 @@ typedef elink_status_t(*read_sfp_module_eeprom_func_p) (struct elink_phy * phy,
LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
#define ELINK_SFP_EEPROM_CON_TYPE_ADDR 0x2
-#define ELINK_SFP_EEPROM_CON_TYPE_VAL_LC 0x7
-#define ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
-#define ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
+ #define ELINK_SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
+ #define ELINK_SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
+
-#define ELINK_SFP_EEPROM_COMP_CODE_ADDR 0x3
-#define ELINK_SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
-#define ELINK_SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
-#define ELINK_SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
+#define ELINK_SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
+ #define ELINK_SFP_EEPROM_10G_COMP_CODE_SR_MASK (1 << 4)
+ #define ELINK_SFP_EEPROM_10G_COMP_CODE_LR_MASK (1 << 5)
+ #define ELINK_SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1 << 6)
+
+#define ELINK_SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
+ #define ELINK_SFP_EEPROM_1G_COMP_CODE_SX (1 << 0)
+ #define ELINK_SFP_EEPROM_1G_COMP_CODE_LX (1 << 1)
+ #define ELINK_SFP_EEPROM_1G_COMP_CODE_CX (1 << 2)
+ #define ELINK_SFP_EEPROM_1G_COMP_CODE_BASE_T (1 << 3)
#define ELINK_SFP_EEPROM_FC_TX_TECH_ADDR 0x8
-#define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
-#define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+ #define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
+ #define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
#define ELINK_SFP_EEPROM_OPTIONS_ADDR 0x40
-#define ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
+ #define ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
#define ELINK_SFP_EEPROM_OPTIONS_SIZE 2
#define ELINK_EDC_MODE_LINEAR 0x0022
@@ -883,6 +907,10 @@ typedef elink_status_t(*read_sfp_module_eeprom_func_p) (struct elink_phy * phy,
#define ELINK_MAX_PACKET_SIZE (9700)
#define MAX_KR_LINK_RETRY 4
+#define DEFAULT_TX_DRV_BRDCT 2
+#define DEFAULT_TX_DRV_IFIR 0
+#define DEFAULT_TX_DRV_POST2 3
+#define DEFAULT_TX_DRV_IPRE_DRIVER 6
/**********************************************************/
/* INTERFACE */
@@ -900,6 +928,11 @@ typedef elink_status_t(*read_sfp_module_eeprom_func_p) (struct elink_phy * phy,
(_bank + (_addr & 0xf)), \
_val)
+static elink_status_t elink_check_half_open_conn(struct elink_params *params,
+ struct elink_vars *vars, uint8_t notify);
+static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
+ struct elink_params *params);
+
static uint32_t elink_bits_en(struct bnx2x_softc *sc, uint32_t reg, uint32_t bits)
{
uint32_t val = REG_RD(sc, reg);
@@ -935,16 +968,16 @@ static int elink_check_lfa(struct elink_params *params)
struct bnx2x_softc *sc = params->sc;
additional_config =
- REG_RD(sc, params->lfa_base +
- offsetof(struct shmem_lfa, additional_config));
+ REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
/* NOTE: must be first condition checked -
- * to verify DCC bit is cleared in any case!
- */
+ * to verify DCC bit is cleared in any case!
+ */
if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
- PMD_DRV_LOG(DEBUG, "No LFA due to DCC flap after clp exit");
+ ELINK_DEBUG_P0(sc, "No LFA due to DCC flap after clp exit");
REG_WR(sc, params->lfa_base +
- offsetof(struct shmem_lfa, additional_config),
+ offsetof(struct shmem_lfa, additional_config),
additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
return LFA_DCC_LFA_DISABLED;
}
@@ -983,8 +1016,8 @@ static int elink_check_lfa(struct elink_params *params)
offsetof(struct shmem_lfa, req_duplex));
req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
- PMD_DRV_LOG(INFO, "Duplex mismatch %x vs. %x",
- (saved_val & lfa_mask), (req_val & lfa_mask));
+ ELINK_DEBUG_P2(sc, "Duplex mismatch %x vs. %x",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_DUPLEX_MISMATCH;
}
/* Compare Flow Control */
@@ -992,8 +1025,8 @@ static int elink_check_lfa(struct elink_params *params)
offsetof(struct shmem_lfa, req_flow_ctrl));
req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
- PMD_DRV_LOG(DEBUG, "Flow control mismatch %x vs. %x",
- (saved_val & lfa_mask), (req_val & lfa_mask));
+ ELINK_DEBUG_P2(sc, "Flow control mismatch %x vs. %x",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_FLOW_CTRL_MISMATCH;
}
/* Compare Link Speed */
@@ -1001,8 +1034,8 @@ static int elink_check_lfa(struct elink_params *params)
offsetof(struct shmem_lfa, req_line_speed));
req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
- PMD_DRV_LOG(DEBUG, "Link speed mismatch %x vs. %x",
- (saved_val & lfa_mask), (req_val & lfa_mask));
+ ELINK_DEBUG_P2(sc, "Link speed mismatch %x vs. %x",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_LINK_SPEED_MISMATCH;
}
@@ -1012,21 +1045,21 @@ static int elink_check_lfa(struct elink_params *params)
speed_cap_mask[cfg_idx]));
if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
- PMD_DRV_LOG(DEBUG, "Speed Cap mismatch %x vs. %x",
- cur_speed_cap_mask,
- params->speed_cap_mask[cfg_idx]);
+ ELINK_DEBUG_P2(sc, "Speed Cap mismatch %x vs. %x",
+ cur_speed_cap_mask,
+ params->speed_cap_mask[cfg_idx]);
return LFA_SPEED_CAP_MISMATCH;
}
}
cur_req_fc_auto_adv =
- REG_RD(sc, params->lfa_base +
- offsetof(struct shmem_lfa, additional_config)) &
- REQ_FC_AUTO_ADV_MASK;
+ REG_RD(sc, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config)) &
+ REQ_FC_AUTO_ADV_MASK;
- if ((uint16_t) cur_req_fc_auto_adv != params->req_fc_auto_adv) {
- PMD_DRV_LOG(DEBUG, "Flow Ctrl AN mismatch %x vs. %x",
- cur_req_fc_auto_adv, params->req_fc_auto_adv);
+ if ((uint16_t)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+ ELINK_DEBUG_P2(sc, "Flow Ctrl AN mismatch %x vs. %x",
+ cur_req_fc_auto_adv, params->req_fc_auto_adv);
return LFA_FLOW_CTRL_MISMATCH;
}
@@ -1038,26 +1071,25 @@ static int elink_check_lfa(struct elink_params *params)
(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)) ||
((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
(params->eee_mode & ELINK_EEE_MODE_ADV_LPI))) {
- PMD_DRV_LOG(DEBUG, "EEE mismatch %x vs. %x", params->eee_mode,
- eee_status);
+ ELINK_DEBUG_P2(sc, "EEE mismatch %x vs. %x", params->eee_mode,
+ eee_status);
return LFA_EEE_MISMATCH;
}
/* LFA conditions are met */
return 0;
}
-
/******************************************************************/
/* EPIO/GPIO section */
/******************************************************************/
static void elink_get_epio(struct bnx2x_softc *sc, uint32_t epio_pin,
- uint32_t * en)
+ uint32_t *en)
{
uint32_t epio_mask, gp_oenable;
*en = 0;
/* Sanity check */
if (epio_pin > 31) {
- PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to get", epio_pin);
+ ELINK_DEBUG_P1(sc, "Invalid EPIO pin %d to get", epio_pin);
return;
}
@@ -1068,17 +1100,16 @@ static void elink_get_epio(struct bnx2x_softc *sc, uint32_t epio_pin,
*en = (REG_RD(sc, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
}
-
static void elink_set_epio(struct bnx2x_softc *sc, uint32_t epio_pin, uint32_t en)
{
uint32_t epio_mask, gp_output, gp_oenable;
/* Sanity check */
if (epio_pin > 31) {
- PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to set", epio_pin);
+ ELINK_DEBUG_P1(sc, "Invalid EPIO pin %d to set", epio_pin);
return;
}
- PMD_DRV_LOG(DEBUG, "Setting EPIO pin %d to %d", epio_pin, en);
+ ELINK_DEBUG_P2(sc, "Setting EPIO pin %d to %d", epio_pin, en);
epio_mask = 1 << epio_pin;
/* Set this EPIO to output */
gp_output = REG_RD(sc, MCP_REG_MCPR_GP_OUTPUTS);
@@ -1104,12 +1135,12 @@ static void elink_set_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg,
} else {
uint8_t gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
uint8_t gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
- elink_cb_gpio_write(sc, gpio_num, (uint8_t) val, gpio_port);
+ elink_cb_gpio_write(sc, gpio_num, (uint8_t)val, gpio_port);
}
}
static uint32_t elink_get_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg,
- uint32_t * val)
+ uint32_t *val)
{
if (pin_cfg == PIN_CFG_NA)
return ELINK_STATUS_ERROR;
@@ -1121,14 +1152,939 @@ static uint32_t elink_get_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg,
*val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
}
return ELINK_STATUS_OK;
+}
+
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+static void elink_ets_e2e3a0_disabled(struct elink_params *params)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x_softc *sc = params->sc;
+
+ ELINK_DEBUG_P0(sc, "ETS E2E3 disabled configuration");
+
+ /* mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
+ */
+
+ REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+ /* For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+ REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+ /* ETS mode disable */
+ REG_WR(sc, PBF_REG_ETS_ENABLED, 0);
+ /* If ETS mode is enabled (there is no strict priority) defines a WFQ
+ * weight for COS0/COS1.
+ */
+ REG_WR(sc, PBF_REG_COS0_WEIGHT, 0x2710);
+ REG_WR(sc, PBF_REG_COS1_WEIGHT, 0x2710);
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+ REG_WR(sc, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+ REG_WR(sc, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+/******************************************************************************
+ * Description:
+ * Getting min_w_val will be set according to line speed .
+ *.
+ ******************************************************************************/
+static uint32_t elink_ets_get_min_w_val_nig(const struct elink_vars *vars)
+{
+ uint32_t min_w_val = 0;
+ /* Calculate min_w_val.*/
+ if (vars->link_up) {
+ if (vars->line_speed == ELINK_SPEED_20000)
+ min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ else
+ min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+ } else {
+ min_w_val = ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ }
+ /* If the link isn't up (static configuration for example ) The
+ * link will be according to 20GBPS.
+ */
+ return min_w_val;
+}
+/******************************************************************************
+ * Description:
+ * Getting credit upper bound form min_w_val.
+ *.
+ ******************************************************************************/
+static uint32_t elink_ets_get_credit_upper_bound(const uint32_t min_w_val)
+{
+ const uint32_t credit_upper_bound = (uint32_t)
+ ELINK_MAXVAL((150 * min_w_val),
+ ELINK_MAX_PACKET_SIZE);
+ return credit_upper_bound;
+}
+/******************************************************************************
+ * Description:
+ * Set credit upper bound for NIG.
+ *.
+ ******************************************************************************/
+static void elink_ets_e3b0_set_credit_upper_bound_nig(
+ const struct elink_params *params,
+ const uint32_t min_w_val)
+{
+ struct bnx2x_softc *sc = params->sc;
+ const uint8_t port = params->port;
+ const uint32_t credit_upper_bound =
+ elink_ets_get_credit_upper_bound(min_w_val);
+
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+ if (!port) {
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+ credit_upper_bound);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+ credit_upper_bound);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+ credit_upper_bound);
+ }
+}
+/******************************************************************************
+ * Description:
+ * Will return the NIG ETS registers to init values.Except
+ * credit_upper_bound.
+ * That isn't used in this configuration (No WFQ is enabled) and will be
+ * configured according to spec
+ *.
+ ******************************************************************************/
+static void elink_ets_e3b0_nig_disabled(const struct elink_params *params,
+ const struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ const uint8_t port = params->port;
+ const uint32_t min_w_val = elink_ets_get_min_w_val_nig(vars);
+ /* Mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+ * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+ * reset value or init tool
+ */
+ if (port) {
+ REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+ REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+ } else {
+ REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+ }
+ /* For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+ NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* Mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ if (port) {
+ /*Port 1 has 6 COS*/
+ REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+ REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+ } else {
+ /*Port 0 has 9 COS*/
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+ 0x43210876);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+ }
+
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ if (port)
+ REG_WR(sc, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+ else
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+ /* Please notice the register address are note continuous and a
+ * for here is note appropriate.In 2 port mode port0 only COS0-5
+ * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+ * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+ * are never used for WFQ
+ */
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+ if (!port) {
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+ }
+
+ elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+ * Description:
+ * Set credit upper bound for PBF.
+ *.
+ ******************************************************************************/
+static void elink_ets_e3b0_set_credit_upper_bound_pbf(
+ const struct elink_params *params,
+ const uint32_t min_w_val)
+{
+ struct bnx2x_softc *sc = params->sc;
+ const uint32_t credit_upper_bound =
+ elink_ets_get_credit_upper_bound(min_w_val);
+ const uint8_t port = params->port;
+ uint32_t base_upper_bound = 0;
+ uint8_t max_cos = 0;
+ uint8_t i = 0;
+ /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+ * port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (!port) {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+ max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+ max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(sc, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+ * Description:
+ * Will return the PBF ETS registers to init values.Except
+ * credit_upper_bound.
+ * That isn't used in this configuration (No WFQ is enabled) and will be
+ * configured according to spec
+ *.
+ ******************************************************************************/
+static void elink_ets_e3b0_pbf_disabled(const struct elink_params *params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ const uint8_t port = params->port;
+ const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL;
+ uint8_t i = 0;
+ uint32_t base_weight = 0;
+ uint8_t max_cos = 0;
+
+ /* Mapping between entry priority to client number 0 - COS0
+ * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+ * TODO_ETS - Should be done by reset value or init tool
+ */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000) */
+ REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, 0x688);
+ else
+ /* (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, 0x2C688);
+
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000)*/
+ REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+ else
+ /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(sc, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+ REG_WR(sc, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+ PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0, 0x100);
+
+
+ REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, 0);
+
+ REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, 0);
+ /* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+ * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (!port) {
+ base_weight = PBF_REG_COS0_WEIGHT_P0;
+ max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_weight = PBF_REG_COS0_WEIGHT_P1;
+ max_cos = ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(sc, base_weight + (0x4 * i), 0);
+
+ elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+ * Description:
+ * E3B0 disable will return basicly the values to init values.
+ *.
+ ******************************************************************************/
+static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
+ const struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ if (!CHIP_IS_E3B0(sc)) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_e3b0_disabled the chip isn't E3B0");
+ return ELINK_STATUS_ERROR;
+ }
+
+ elink_ets_e3b0_nig_disabled(params, vars);
+
+ elink_ets_e3b0_pbf_disabled(params);
+
+ return ELINK_STATUS_OK;
+}
+
+/******************************************************************************
+ * Description:
+ * Disable will return basicly the values to init values.
+ *
+ ******************************************************************************/
+elink_status_t elink_ets_disabled(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ elink_status_t elink_status = ELINK_STATUS_OK;
+
+ if ((CHIP_IS_E2(sc)) || (CHIP_IS_E3A0(sc))) {
+ elink_ets_e2e3a0_disabled(params);
+ } else if (CHIP_IS_E3B0(sc)) {
+ elink_status = elink_ets_e3b0_disabled(params, vars);
+ } else {
+ ELINK_DEBUG_P0(sc, "elink_ets_disabled - chip not supported");
+ return ELINK_STATUS_ERROR;
+ }
+
+ return elink_status;
+}
+
+/******************************************************************************
+ * Description
+ * Set the COS mappimg to SP and BW until this point all the COS are not
+ * set as SP or BW.
+ ******************************************************************************/
+static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params,
+ __rte_unused const struct elink_ets_params *ets_params,
+ const uint8_t cos_sp_bitmap,
+ const uint8_t cos_bw_bitmap)
+{
+ struct bnx2x_softc *sc = params->sc;
+ const uint8_t port = params->port;
+ const uint8_t nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+ const uint8_t pbf_cli_sp_bitmap = cos_sp_bitmap;
+ const uint8_t nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+ const uint8_t pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+ REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0, pbf_cli_sp_bitmap);
+
+ REG_WR(sc, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ nig_cli_subject2wfq_bitmap);
+
+ REG_WR(sc, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+ pbf_cli_subject2wfq_bitmap);
+
+ return ELINK_STATUS_OK;
+}
+
+/******************************************************************************
+ * Description:
+ * This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+ * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+ ******************************************************************************/
+static elink_status_t elink_ets_e3b0_set_cos_bw(struct bnx2x_softc *sc,
+ const uint8_t cos_entry,
+ const uint32_t min_w_val_nig,
+ const uint32_t min_w_val_pbf,
+ const uint16_t total_bw,
+ const uint8_t bw,
+ const uint8_t port)
+{
+ uint32_t nig_reg_address_crd_weight = 0;
+ uint32_t pbf_reg_address_crd_weight = 0;
+ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+ const uint32_t cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+ const uint32_t cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_address_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_address_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
+ case 1:
+ nig_reg_address_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_address_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
+ case 2:
+ nig_reg_address_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+ pbf_reg_address_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
+ case 3:
+ if (port)
+ return ELINK_STATUS_ERROR;
+ nig_reg_address_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_address_crd_weight =
+ PBF_REG_COS3_WEIGHT_P0;
+ break;
+ case 4:
+ if (port)
+ return ELINK_STATUS_ERROR;
+ nig_reg_address_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_address_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
+ case 5:
+ if (port)
+ return ELINK_STATUS_ERROR;
+ nig_reg_address_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_address_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
+ }
+
+ REG_WR(sc, nig_reg_address_crd_weight, cos_bw_nig);
+
+ REG_WR(sc, pbf_reg_address_crd_weight, cos_bw_pbf);
+
+ return ELINK_STATUS_OK;
+}
+/******************************************************************************
+ * Description:
+ * Calculate the total BW.A value of 0 isn't legal.
+ *
+ ******************************************************************************/
+static elink_status_t elink_ets_e3b0_get_total_bw(
+ const struct elink_params *params,
+ struct elink_ets_params *ets_params,
+ uint16_t *total_bw)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t cos_idx = 0;
+ uint8_t is_bw_cos_exist = 0;
+
+ *total_bw = 0;
+ /* Calculate total BW requested */
+ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+ if (ets_params->cos[cos_idx].state == elink_cos_state_bw) {
+ is_bw_cos_exist = 1;
+ if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+ ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config BW"
+ " was set to 0");
+ /* This is to prevent a state when ramrods
+ * can't be sent
+ */
+ ets_params->cos[cos_idx].params.bw_params.bw
+ = 1;
+ }
+ *total_bw +=
+ ets_params->cos[cos_idx].params.bw_params.bw;
+ }
+ }
+
+ /* Check total BW is valid */
+ if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+ if (*total_bw == 0) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_E3B0_config total BW shouldn't be 0");
+ return ELINK_STATUS_ERROR;
+ }
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_E3B0_config total BW should be 100");
+ /* We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
+ }
+ return ELINK_STATUS_OK;
+}
+
+/******************************************************************************
+ * Description:
+ * Invalidate all the sp_pri_to_cos.
+ *
+ ******************************************************************************/
+static void elink_ets_e3b0_sp_pri_to_cos_init(uint8_t *sp_pri_to_cos)
+{
+ uint8_t pri = 0;
+ for (pri = 0; pri < ELINK_DCBX_MAX_NUM_COS; pri++)
+ sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+ * Description:
+ * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+ * according to sp_pri_to_cos.
+ *
+ ******************************************************************************/
+static elink_status_t elink_ets_e3b0_sp_pri_to_cos_set(
+ const struct elink_params *params,
+ uint8_t *sp_pri_to_cos,
+ const uint8_t pri,
+ const uint8_t cos_entry)
+{
+ struct bnx2x_softc *sc = params->sc;
+ const uint8_t port = params->port;
+ const uint8_t max_num_of_cos = (port) ?
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ if (pri >= max_num_of_cos) {
+ ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter Illegal strict priority");
+ return ELINK_STATUS_ERROR;
+ }
+
+ if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
+ ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter There can't be two COS's with "
+ "the same strict pri");
+ return ELINK_STATUS_ERROR;
+ }
+
+ sp_pri_to_cos[pri] = cos_entry;
+ return ELINK_STATUS_OK;
+}
+
+/******************************************************************************
+ * Description:
+ * Returns the correct value according to COS and priority in
+ * the sp_pri_cli register.
+ *
+ ******************************************************************************/
+static uint64_t elink_e3b0_sp_get_pri_cli_reg(const uint8_t cos,
+ const uint8_t cos_offset,
+ const uint8_t pri_set,
+ const uint8_t pri_offset,
+ const uint8_t entry_size)
+{
+ uint64_t pri_cli_nig = 0;
+ pri_cli_nig = ((uint64_t)(cos + cos_offset)) << (entry_size *
+ (pri_set + pri_offset));
+
+ return pri_cli_nig;
+}
+/******************************************************************************
+ * Description:
+ * Returns the correct value according to COS and priority in the
+ * sp_pri_cli register for NIG.
+ *
+ ******************************************************************************/
+static uint64_t elink_e3b0_sp_get_pri_cli_reg_nig(const uint8_t cos,
+ const uint8_t pri_set)
+{
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ const uint8_t nig_cos_offset = 3;
+ const uint8_t nig_pri_offset = 3;
+
+ return elink_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+ nig_pri_offset, 4);
+}
+/******************************************************************************
+ * Description:
+ * Returns the correct value according to COS and priority in the
+ * sp_pri_cli register for PBF.
+ *
+ ******************************************************************************/
+static uint64_t elink_e3b0_sp_get_pri_cli_reg_pbf(const uint8_t cos,
+ const uint8_t pri_set)
+{
+ const uint8_t pbf_cos_offset = 0;
+ const uint8_t pbf_pri_offset = 0;
+
+ return elink_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+ pbf_pri_offset, 3);
+}
+
+/******************************************************************************
+ * Description:
+ * Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+ * according to sp_pri_to_cos.(which COS has higher priority)
+ *
+ ******************************************************************************/
+static elink_status_t elink_ets_e3b0_sp_set_pri_cli_reg(
+ const struct elink_params *params,
+ uint8_t *sp_pri_to_cos)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint8_t i = 0;
+ const uint8_t port = params->port;
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ uint64_t pri_cli_nig = 0x210;
+ uint32_t pri_cli_pbf = 0x0;
+ uint8_t pri_set = 0;
+ uint8_t pri_bitmask = 0;
+ const uint8_t max_num_of_cos = (port) ?
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ uint8_t cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+ /* Set all the strict priority first */
+ for (i = 0; i < max_num_of_cos; i++) {
+ if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+ if (sp_pri_to_cos[i] >= ELINK_DCBX_MAX_NUM_COS) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid cos entry");
+ return ELINK_STATUS_ERROR;
+ }
+
+ pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig(
+ sp_pri_to_cos[i], pri_set);
+
+ pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf(
+ sp_pri_to_cos[i], pri_set);
+ pri_bitmask = 1 << sp_pri_to_cos[i];
+ /* COS is used remove it from bitmap.*/
+ if (!(pri_bitmask & cos_bit_to_set)) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid There can't be two COS's with"
+ " the same strict pri");
+ return ELINK_STATUS_ERROR;
+ }
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ /* Set all the Non strict priority i= COS*/
+ for (i = 0; i < max_num_of_cos; i++) {
+ pri_bitmask = 1 << i;
+ /* Check if COS was already used for SP */
+ if (pri_bitmask & cos_bit_to_set) {
+ /* COS wasn't used for SP */
+ pri_cli_nig |= elink_e3b0_sp_get_pri_cli_reg_nig(
+ i, pri_set);
+
+ pri_cli_pbf |= elink_e3b0_sp_get_pri_cli_reg_pbf(
+ i, pri_set);
+ /* COS is used remove it from bitmap.*/
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ if (pri_set != max_num_of_cos) {
+ ELINK_DEBUG_P0(sc, "elink_ets_e3b0_sp_set_pri_cli_reg not all "
+ "entries were set");
+ return ELINK_STATUS_ERROR;
+ }
+
+ if (port) {
+ /* Only 6 usable clients*/
+ REG_WR(sc, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+ (uint32_t)pri_cli_nig);
+
+ REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1, pri_cli_pbf);
+ } else {
+ /* Only 9 usable clients*/
+ const uint32_t pri_cli_nig_lsb = (uint32_t)(pri_cli_nig);
+ const uint32_t pri_cli_nig_msb = (uint32_t)
+ ((pri_cli_nig >> 32) & 0xF);
+
+ REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+ pri_cli_nig_lsb);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+ pri_cli_nig_msb);
+
+ REG_WR(sc, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0, pri_cli_pbf);
+ }
+ return ELINK_STATUS_OK;
+}
+
+/******************************************************************************
+ * Description:
+ * Configure the COS to ETS according to BW and SP settings.
+ ******************************************************************************/
+elink_status_t elink_ets_e3b0_config(const struct elink_params *params,
+ const struct elink_vars *vars,
+ struct elink_ets_params *ets_params)
+{
+ struct bnx2x_softc *sc = params->sc;
+ elink_status_t elink_status = ELINK_STATUS_OK;
+ const uint8_t port = params->port;
+ uint16_t total_bw = 0;
+ const uint32_t min_w_val_nig = elink_ets_get_min_w_val_nig(vars);
+ const uint32_t min_w_val_pbf = ELINK_ETS_E3B0_PBF_MIN_W_VAL;
+ uint8_t cos_bw_bitmap = 0;
+ uint8_t cos_sp_bitmap = 0;
+ uint8_t sp_pri_to_cos[ELINK_DCBX_MAX_NUM_COS] = {0};
+ const uint8_t max_num_of_cos = (port) ?
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0;
+ uint8_t cos_entry = 0;
+
+ if (!CHIP_IS_E3B0(sc)) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_e3b0_disabled the chip isn't E3B0");
+ return ELINK_STATUS_ERROR;
+ }
+
+ if (ets_params->num_of_cos > max_num_of_cos) {
+ ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config the number of COS "
+ "isn't supported");
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* Prepare sp strict priority parameters*/
+ elink_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+ /* Prepare BW parameters*/
+ elink_status = elink_ets_e3b0_get_total_bw(params, ets_params,
+ &total_bw);
+ if (elink_status != ELINK_STATUS_OK) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_E3B0_config get_total_bw failed");
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
+ */
+ elink_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+ elink_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+ for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+ if (elink_cos_state_bw == ets_params->cos[cos_entry].state) {
+ cos_bw_bitmap |= (1 << cos_entry);
+ /* The function also sets the BW in HW(not the mappin
+ * yet)
+ */
+ elink_status = elink_ets_e3b0_set_cos_bw(
+ sc, cos_entry, min_w_val_nig, min_w_val_pbf,
+ total_bw,
+ ets_params->cos[cos_entry].params.bw_params.bw,
+ port);
+ } else if (elink_cos_state_strict ==
+ ets_params->cos[cos_entry].state){
+ cos_sp_bitmap |= (1 << cos_entry);
+
+ elink_status = elink_ets_e3b0_sp_pri_to_cos_set(
+ params,
+ sp_pri_to_cos,
+ ets_params->cos[cos_entry].params.sp_params.pri,
+ cos_entry);
+
+ } else {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_e3b0_config cos state not valid");
+ return ELINK_STATUS_ERROR;
+ }
+ if (elink_status != ELINK_STATUS_OK) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_e3b0_config set cos bw failed");
+ return elink_status;
+ }
+ }
+
+ /* Set SP register (which COS has higher priority) */
+ elink_status = elink_ets_e3b0_sp_set_pri_cli_reg(params,
+ sp_pri_to_cos);
+
+ if (elink_status != ELINK_STATUS_OK) {
+ ELINK_DEBUG_P0(sc,
+ "elink_ets_E3B0_config set_pri_cli_reg failed");
+ return elink_status;
+ }
+
+ /* Set client mapping of BW and strict */
+ elink_status = elink_ets_e3b0_cli_map(params, ets_params,
+ cos_sp_bitmap,
+ cos_bw_bitmap);
+
+ if (elink_status != ELINK_STATUS_OK) {
+ ELINK_DEBUG_P0(sc, "elink_ets_E3B0_config SP failed");
+ return elink_status;
+ }
+ return ELINK_STATUS_OK;
+}
+static void elink_ets_bw_limit_common(const struct elink_params *params)
+{
+ /* ETS disabled configuration */
+ struct bnx2x_softc *sc = params->sc;
+ ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration");
+ /* Defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+ /* Mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+ ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+ ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+ /* ETS mode enabled*/
+ REG_WR(sc, PBF_REG_ETS_ENABLED, 1);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+ REG_WR(sc, PBF_REG_COS0_UPPER_BOUND,
+ ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(sc, PBF_REG_COS1_UPPER_BOUND,
+ ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void elink_ets_bw_limit(const struct elink_params *params,
+ const uint32_t cos0_bw,
+ const uint32_t cos1_bw)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x_softc *sc = params->sc;
+ const uint32_t total_bw = cos0_bw + cos1_bw;
+ uint32_t cos0_credit_weight = 0;
+ uint32_t cos1_credit_weight = 0;
+
+ ELINK_DEBUG_P0(sc, "ETS enabled BW limit configuration");
+
+ if ((!total_bw) ||
+ (!cos0_bw) ||
+ (!cos1_bw)) {
+ ELINK_DEBUG_P0(sc, "Total BW can't be zero");
+ return;
+ }
+
+ cos0_credit_weight = (cos0_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) /
+ total_bw;
+ cos1_credit_weight = (cos1_bw * ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT) /
+ total_bw;
+
+ elink_ets_bw_limit_common(params);
+
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+ REG_WR(sc, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+ REG_WR(sc, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+elink_status_t elink_ets_strict(const struct elink_params *params,
+ const uint8_t strict_cos)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t val = 0;
+
+ ELINK_DEBUG_P0(sc, "ETS enabled strict configuration");
+ /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries,
+ * 3 - COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+ /* For strict priority entries defines the number of consecutive slots
+ * for the highest priority.
+ */
+ REG_WR(sc, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* ETS mode disable */
+ REG_WR(sc, PBF_REG_ETS_ENABLED, 0);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(sc, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(sc, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+ /* Mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
+ val = (!strict_cos) ? 0x2318 : 0x22E0;
+ REG_WR(sc, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+ return ELINK_STATUS_OK;
}
/******************************************************************/
/* PFC section */
/******************************************************************/
static void elink_update_pfc_xmac(struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_vars *vars,
+ __rte_unused uint8_t is_lb)
{
struct bnx2x_softc *sc = params->sc;
uint32_t xmac_base;
@@ -1143,7 +2099,8 @@ static void elink_update_pfc_xmac(struct elink_params *params,
pfc1_val = 0x2;
/* No PFC support */
- if (!(params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
/* RX flow control - Process pause frame in receive direction
*/
@@ -1153,12 +2110,12 @@ static void elink_update_pfc_xmac(struct elink_params *params,
/* TX flow control - Send pause packet when buffer is full */
if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
- } else { /* PFC support */
+ } else {/* PFC support */
pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
- XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
- XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
- XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
- XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
+ XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+ XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_TX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON;
/* Write pause and PFC registers */
REG_WR(sc, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
@@ -1172,21 +2129,76 @@ static void elink_update_pfc_xmac(struct elink_params *params,
REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
/* Set MAC address for source TX Pause/PFC frames */
REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_LO,
((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
- (params->mac_addr[4] << 8) | (params->mac_addr[5])));
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_HI,
- ((params->mac_addr[0] << 8) | (params->mac_addr[1])));
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
DELAY(30);
}
+static void elink_emac_get_pfc_stat(struct elink_params *params,
+ uint32_t pfc_frames_sent[2],
+ uint32_t pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ uint32_t val_xon = 0;
+ uint32_t val_xoff = 0;
+
+ ELINK_DEBUG_P0(sc, "pfc statistic read from EMAC");
+
+ /* PFC received frames */
+ val_xoff = REG_RD(sc, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
+ val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
+
+ pfc_frames_received[0] = val_xon + val_xoff;
+
+ /* PFC received sent */
+ val_xoff = REG_RD(sc, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_SENT);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
+ val_xon = REG_RD(sc, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
+
+ pfc_frames_sent[0] = val_xon + val_xoff;
+}
+
+/* Read pfc statistic*/
+void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars,
+ uint32_t pfc_frames_sent[2],
+ uint32_t pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x_softc *sc = params->sc;
+
+ ELINK_DEBUG_P0(sc, "pfc statistic");
+
+ if (!vars->link_up)
+ return;
+
+ if (vars->mac_type == ELINK_MAC_TYPE_EMAC) {
+ ELINK_DEBUG_P0(sc, "About to read PFC stats from EMAC");
+ elink_emac_get_pfc_stat(params, pfc_frames_sent,
+ pfc_frames_received);
+ }
+}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
-static void elink_set_mdio_clk(struct bnx2x_softc *sc, uint32_t emac_base)
+static void elink_set_mdio_clk(struct bnx2x_softc *sc,
+ __rte_unused uint32_t chip_id,
+ uint32_t emac_base)
{
uint32_t new_mode, cur_mode;
uint32_t clc_cnt;
@@ -1205,26 +2217,16 @@ static void elink_set_mdio_clk(struct bnx2x_softc *sc, uint32_t emac_base)
return;
new_mode = cur_mode &
- ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+ ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
new_mode |= clc_cnt;
new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
- PMD_DRV_LOG(DEBUG, "Changing emac_mode from 0x%x to 0x%x",
- cur_mode, new_mode);
+ ELINK_DEBUG_P2(sc, "Changing emac_mode from 0x%x to 0x%x",
+ cur_mode, new_mode);
REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
DELAY(40);
}
-static void elink_set_mdio_emac_per_phy(struct bnx2x_softc *sc,
- struct elink_params *params)
-{
- uint8_t phy_index;
- /* Set mdio clock per phy */
- for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys;
- phy_index++)
- elink_set_mdio_clk(sc, params->phy[phy_index].mdio_ctrl);
-}
-
static uint8_t elink_is_4_port_mode(struct bnx2x_softc *sc)
{
uint32_t port4mode_ovwr_val;
@@ -1232,13 +2234,26 @@ static uint8_t elink_is_4_port_mode(struct bnx2x_softc *sc)
port4mode_ovwr_val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
if (port4mode_ovwr_val & (1 << 0)) {
/* Return 4-port mode override value */
- return (port4mode_ovwr_val & (1 << 1)) == (1 << 1);
+ return ((port4mode_ovwr_val & (1 << 1)) == (1 << 1));
}
/* Return 4-port mode from input pin */
- return (uint8_t) REG_RD(sc, MISC_REG_PORT4MODE_EN);
+ return (uint8_t)REG_RD(sc, MISC_REG_PORT4MODE_EN);
+}
+
+static void elink_set_mdio_emac_per_phy(struct bnx2x_softc *sc,
+ struct elink_params *params)
+{
+ uint8_t phy_index;
+
+ /* Set mdio clock per phy */
+ for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys;
+ phy_index++)
+ elink_set_mdio_clk(sc, params->chip_id,
+ params->phy[phy_index].mdio_ctrl);
}
-static void elink_emac_init(struct elink_params *params)
+static void elink_emac_init(struct elink_params *params,
+ __rte_unused struct elink_vars *vars)
{
/* reset and unreset the emac core */
struct bnx2x_softc *sc = params->sc;
@@ -1262,9 +2277,9 @@ static void elink_emac_init(struct elink_params *params)
timeout = 200;
do {
val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE);
- PMD_DRV_LOG(DEBUG, "EMAC reset reg is %u", val);
+ ELINK_DEBUG_P1(sc, "EMAC reset reg is %u", val);
if (!timeout) {
- PMD_DRV_LOG(DEBUG, "EMAC timeout!");
+ ELINK_DEBUG_P0(sc, "EMAC timeout!");
return;
}
timeout--;
@@ -1272,17 +2287,20 @@ static void elink_emac_init(struct elink_params *params)
elink_set_mdio_emac_per_phy(sc, params);
/* Set mac address */
- val = ((params->mac_addr[0] << 8) | params->mac_addr[1]);
+ val = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH, val);
val = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
- (params->mac_addr[4] << 8) | params->mac_addr[5]);
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH + 4, val);
}
static void elink_set_xumac_nig(struct elink_params *params,
- uint16_t tx_pause_en, uint8_t enable)
+ uint16_t tx_pause_en,
+ uint8_t enable)
{
struct bnx2x_softc *sc = params->sc;
@@ -1300,7 +2318,7 @@ static void elink_set_umac_rxtx(struct elink_params *params, uint8_t en)
uint32_t val;
struct bnx2x_softc *sc = params->sc;
if (!(REG_RD(sc, MISC_REG_RESET_REG_2) &
- (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
return;
val = REG_RD(sc, umac_base + UMAC_REG_COMMAND_CONFIG);
if (en)
@@ -1314,7 +2332,7 @@ static void elink_set_umac_rxtx(struct elink_params *params, uint8_t en)
}
static void elink_umac_enable(struct elink_params *params,
- struct elink_vars *vars, uint8_t lb)
+ struct elink_vars *vars, uint8_t lb)
{
uint32_t val;
uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
@@ -1327,15 +2345,15 @@ static void elink_umac_enable(struct elink_params *params,
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
- PMD_DRV_LOG(DEBUG, "enabling UMAC");
+ ELINK_DEBUG_P0(sc, "enabling UMAC");
/* This register opens the gate for the UMAC despite its name */
REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1);
val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
- UMAC_COMMAND_CONFIG_REG_PAD_EN |
- UMAC_COMMAND_CONFIG_REG_SW_RESET |
- UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+ UMAC_COMMAND_CONFIG_REG_PAD_EN |
+ UMAC_COMMAND_CONFIG_REG_SW_RESET |
+ UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
switch (vars->line_speed) {
case ELINK_SPEED_10:
val |= (0 << 2);
@@ -1350,8 +2368,8 @@ static void elink_umac_enable(struct elink_params *params,
val |= (3 << 2);
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid speed for UMAC %d",
- vars->line_speed);
+ ELINK_DEBUG_P1(sc, "Invalid speed for UMAC %d",
+ vars->line_speed);
break;
}
if (!(vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
@@ -1368,7 +2386,7 @@ static void elink_umac_enable(struct elink_params *params,
/* Configure UMAC for EEE */
if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
- PMD_DRV_LOG(DEBUG, "configured UMAC for EEE");
+ ELINK_DEBUG_P0(sc, "configured UMAC for EEE");
REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL,
UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
REG_WR(sc, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
@@ -1380,13 +2398,16 @@ static void elink_umac_enable(struct elink_params *params,
REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR0,
((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
- (params->mac_addr[4] << 8) | (params->mac_addr[5])));
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR1,
- ((params->mac_addr[0] << 8) | (params->mac_addr[1])));
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
/* Enable RX and TX */
val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
- val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | UMAC_COMMAND_CONFIG_REG_RX_ENA;
+ val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA;
REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val);
DELAY(50);
@@ -1426,7 +2447,8 @@ static void elink_xmac_init(struct elink_params *params, uint32_t max_speed)
is_port4mode &&
(REG_RD(sc, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
- PMD_DRV_LOG(DEBUG, "XMAC already out of reset in 4-port mode");
+ ELINK_DEBUG_P0(sc,
+ "XMAC already out of reset in 4-port mode");
return;
}
@@ -1438,7 +2460,7 @@ static void elink_xmac_init(struct elink_params *params, uint32_t max_speed)
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
if (is_port4mode) {
- PMD_DRV_LOG(DEBUG, "Init XMAC to 2 ports x 10G per path");
+ ELINK_DEBUG_P0(sc, "Init XMAC to 2 ports x 10G per path");
/* Set the number of ports on the system side to up to 2 */
REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 1);
@@ -1449,13 +2471,13 @@ static void elink_xmac_init(struct elink_params *params, uint32_t max_speed)
/* Set the number of ports on the system side to 1 */
REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 0);
if (max_speed == ELINK_SPEED_10000) {
- PMD_DRV_LOG(DEBUG,
- "Init XMAC to 10G x 1 port per path");
+ ELINK_DEBUG_P0(sc,
+ "Init XMAC to 10G x 1 port per path");
/* Set the number of ports on the Warp Core to 10G */
REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3);
} else {
- PMD_DRV_LOG(DEBUG,
- "Init XMAC to 20G x 2 ports per path");
+ ELINK_DEBUG_P0(sc,
+ "Init XMAC to 20G x 2 ports per path");
/* Set the number of ports on the Warp Core to 20G */
REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 1);
}
@@ -1477,7 +2499,8 @@ static void elink_set_xmac_rxtx(struct elink_params *params, uint8_t en)
uint32_t pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
uint32_t val;
- if (REG_RD(sc, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) {
+ if (REG_RD(sc, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC) {
/* Send an indication to change the state in the NIG back to XON
* Clearing this bit enables the next set of this bit to get
* rising edge
@@ -1487,7 +2510,7 @@ static void elink_set_xmac_rxtx(struct elink_params *params, uint8_t en)
(pfc_ctrl & ~(1 << 1)));
REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI,
(pfc_ctrl | (1 << 1)));
- PMD_DRV_LOG(DEBUG, "Disable XMAC on port %x", port);
+ ELINK_DEBUG_P1(sc, "Disable XMAC on port %x", port);
val = REG_RD(sc, xmac_base + XMAC_REG_CTRL);
if (en)
val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
@@ -1498,11 +2521,11 @@ static void elink_set_xmac_rxtx(struct elink_params *params, uint8_t en)
}
static elink_status_t elink_xmac_enable(struct elink_params *params,
- struct elink_vars *vars, uint8_t lb)
+ struct elink_vars *vars, uint8_t lb)
{
uint32_t val, xmac_base;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "enabling XMAC");
+ ELINK_DEBUG_P0(sc, "enabling XMAC");
xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
@@ -1536,10 +2559,10 @@ static elink_status_t elink_xmac_enable(struct elink_params *params,
REG_WR(sc, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
/* update PFC */
- elink_update_pfc_xmac(params, vars);
+ elink_update_pfc_xmac(params, vars, 0);
if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
- PMD_DRV_LOG(DEBUG, "Setting XMAC for EEE");
+ ELINK_DEBUG_P0(sc, "Setting XMAC for EEE");
REG_WR(sc, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
} else {
@@ -1568,14 +2591,14 @@ static elink_status_t elink_xmac_enable(struct elink_params *params,
}
static elink_status_t elink_emac_enable(struct elink_params *params,
- struct elink_vars *vars, uint8_t lb)
+ struct elink_vars *vars, uint8_t lb)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "enabling EMAC");
+ ELINK_DEBUG_P0(sc, "enabling EMAC");
/* Disable BMAC */
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
@@ -1584,19 +2607,39 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
/* enable emac and not bmac */
REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1);
+#ifdef ELINK_INCLUDE_EMUL
+ /* for paladium */
+ if (CHIP_REV_IS_EMUL(sc)) {
+ /* Use lane 1 (of lanes 0-3) */
+ REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);
+ REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
+ }
+ /* for fpga */
+ else
+#endif
+#ifdef ELINK_INCLUDE_FPGA
+ if (CHIP_REV_IS_FPGA(sc)) {
+ /* Use lane 1 (of lanes 0-3) */
+ ELINK_DEBUG_P0(sc, "elink_emac_enable: Setting FPGA");
+
+ REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);
+ REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0);
+ } else
+#endif
+ /* ASIC */
if (vars->phy_flags & PHY_XGXS_FLAG) {
uint32_t ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- PMD_DRV_LOG(DEBUG, "XGXS");
+ ELINK_DEBUG_P0(sc, "XGXS");
/* select the master lanes (out of 0-3) */
REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, ser_lane);
/* select XGXS */
REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
- } else { /* SerDes */
- PMD_DRV_LOG(DEBUG, "SerDes");
+ } else { /* SerDes */
+ ELINK_DEBUG_P0(sc, "SerDes");
/* select SerDes */
REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0);
}
@@ -1606,28 +2649,39 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- /* pause enable/disable */
- elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
-
- elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- if (!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
- if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
- elink_bits_en(sc, emac_base +
- EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
+#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
+ if (CHIP_REV_IS_SLOW(sc)) {
+ /* config GMII mode */
+ val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE);
+ elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE,
+ (val | EMAC_MODE_PORT_GMII));
+ } else { /* ASIC */
+#endif
+ /* pause enable/disable */
+ elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
- if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
- elink_bits_en(sc, emac_base +
- EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- } else
- elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_FLOW_EN);
+ elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
+ elink_bits_en(sc, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
+ elink_bits_en(sc, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
+#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
+ }
+#endif
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(sc, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1642,18 +2696,18 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
*/
elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, 0);
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) {
- PMD_DRV_LOG(DEBUG, "PFC is enabled");
+ ELINK_DEBUG_P0(sc, "PFC is enabled");
/* Enable PFC again */
elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE,
- EMAC_REG_RX_PFC_MODE_RX_EN |
- EMAC_REG_RX_PFC_MODE_TX_EN |
- EMAC_REG_RX_PFC_MODE_PRIORITIES);
+ EMAC_REG_RX_PFC_MODE_RX_EN |
+ EMAC_REG_RX_PFC_MODE_TX_EN |
+ EMAC_REG_RX_PFC_MODE_PRIORITIES);
elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_PARAM,
- ((0x0101 <<
- EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
- (0x00ff <<
- EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+ ((0x0101 <<
+ EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+ (0x00ff <<
+ EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
}
elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MODE, val);
@@ -1671,9 +2725,8 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
/* Enable emac for jumbo packets */
elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MTU_SIZE,
- (EMAC_RX_MTU_SIZE_JUMBO_ENA |
- (ELINK_ETH_MAX_JUMBO_PACKET_SIZE +
- ELINK_ETH_OVREHEAD)));
+ (EMAC_RX_MTU_SIZE_JUMBO_ENA |
+ (ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD)));
/* Strip CRC */
REG_WR(sc, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port * 4, 0x1);
@@ -1687,13 +2740,23 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0x1);
val = 0;
if ((params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
val = 1;
REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, val);
REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x1);
+#ifdef ELINK_INCLUDE_EMUL
+ if (CHIP_REV_IS_EMUL(sc)) {
+ /* Take the BigMac out of reset */
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* Enable access for bmac registers */
+ REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1);
+ } else
+#endif
REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x0);
vars->mac_type = ELINK_MAC_TYPE_EMAC;
@@ -1705,13 +2768,13 @@ static void elink_update_pfc_bmac1(struct elink_params *params,
{
uint32_t wb_data[2];
struct bnx2x_softc *sc = params->sc;
- uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
uint32_t val = 0x14;
if ((!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED)) &&
- (vars->flow_ctrl & ELINK_FLOW_CTRL_RX))
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_RX))
/* Enable BigMAC to react on received Pause packets */
val |= (1 << 5);
wb_data[0] = val;
@@ -1722,7 +2785,7 @@ static void elink_update_pfc_bmac1(struct elink_params *params,
val = 0xc0;
if (!(params->feature_config_flags &
ELINK_FEATURE_CONFIG_PFC_ENABLED) &&
- (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
@@ -1730,7 +2793,8 @@ static void elink_update_pfc_bmac1(struct elink_params *params,
}
static void elink_update_pfc_bmac2(struct elink_params *params,
- struct elink_vars *vars, uint8_t is_lb)
+ struct elink_vars *vars,
+ uint8_t is_lb)
{
/* Set rx control: Strip CRC and enable BigMAC to relay
* control packets to the system as well
@@ -1738,12 +2802,12 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
uint32_t wb_data[2];
struct bnx2x_softc *sc = params->sc;
uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
uint32_t val = 0x14;
if ((!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED)) &&
- (vars->flow_ctrl & ELINK_FLOW_CTRL_RX))
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & ELINK_FLOW_CTRL_RX))
/* Enable BigMAC to react on received Pause packets */
val |= (1 << 5);
wb_data[0] = val;
@@ -1754,7 +2818,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
/* Tx control */
val = 0xc0;
if (!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED) &&
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) &&
(vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
@@ -1762,21 +2826,21 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) {
- PMD_DRV_LOG(DEBUG, "PFC is enabled");
+ ELINK_DEBUG_P0(sc, "PFC is enabled");
/* Enable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x0;
- wb_data[0] |= (1 << 0); /* RX */
- wb_data[0] |= (1 << 1); /* TX */
- wb_data[0] |= (1 << 2); /* Force initial Xon */
- wb_data[0] |= (1 << 3); /* 8 cos */
- wb_data[0] |= (1 << 5); /* STATS */
+ wb_data[0] |= (1 << 0); /* RX */
+ wb_data[0] |= (1 << 1); /* TX */
+ wb_data[0] |= (1 << 2); /* Force initial Xon */
+ wb_data[0] |= (1 << 3); /* 8 cos */
+ wb_data[0] |= (1 << 5); /* STATS */
wb_data[1] = 0;
REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
wb_data, 2);
/* Clear the force Xon */
wb_data[0] &= ~(1 << 2);
} else {
- PMD_DRV_LOG(DEBUG, "PFC is disabled");
+ ELINK_DEBUG_P0(sc, "PFC is disabled");
/* Disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
@@ -1791,7 +2855,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
*/
val = 0x8000;
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
- val |= (1 << 16); /* enable automatic re-send */
+ val |= (1 << 16); /* enable automatic re-send */
wb_data[0] = val;
wb_data[1] = 0;
@@ -1799,10 +2863,10 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
wb_data, 2);
/* mac control */
- val = 0x3; /* Enable RX and TX */
+ val = 0x3; /* Enable RX and TX */
if (is_lb) {
- val |= 0x4; /* Local loopback */
- PMD_DRV_LOG(DEBUG, "enable bmac loopback");
+ val |= 0x4; /* Local loopback */
+ ELINK_DEBUG_P0(sc, "enable bmac loopback");
}
/* When PFC enabled, Pass pause frames towards the NIG. */
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
@@ -1814,47 +2878,46 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
}
/******************************************************************************
-* Description:
-* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
-* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
-******************************************************************************/
+ * Description:
+ * This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+ * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+ ******************************************************************************/
static elink_status_t elink_pfc_nig_rx_priority_mask(struct bnx2x_softc *sc,
- uint8_t cos_entry,
- uint32_t priority_mask,
- uint8_t port)
+ uint8_t cos_entry,
+ uint32_t priority_mask, uint8_t port)
{
uint32_t nig_reg_rx_priority_mask_add = 0;
switch (cos_entry) {
case 0:
- nig_reg_rx_priority_mask_add = (port) ?
- NIG_REG_P1_RX_COS0_PRIORITY_MASK :
- NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK;
break;
case 1:
- nig_reg_rx_priority_mask_add = (port) ?
- NIG_REG_P1_RX_COS1_PRIORITY_MASK :
- NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK;
break;
case 2:
- nig_reg_rx_priority_mask_add = (port) ?
- NIG_REG_P1_RX_COS2_PRIORITY_MASK :
- NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS2_PRIORITY_MASK;
break;
case 3:
if (port)
- return ELINK_STATUS_ERROR;
- nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+ return ELINK_STATUS_ERROR;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
break;
case 4:
if (port)
- return ELINK_STATUS_ERROR;
- nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+ return ELINK_STATUS_ERROR;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
break;
case 5:
if (port)
- return ELINK_STATUS_ERROR;
- nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+ return ELINK_STATUS_ERROR;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
break;
}
@@ -1862,7 +2925,6 @@ static elink_status_t elink_pfc_nig_rx_priority_mask(struct bnx2x_softc *sc,
return ELINK_STATUS_OK;
}
-
static void elink_update_mng(struct elink_params *params, uint32_t link_status)
{
struct bnx2x_softc *sc = params->sc;
@@ -1872,31 +2934,20 @@ static void elink_update_mng(struct elink_params *params, uint32_t link_status)
port_mb[params->port].link_status), link_status);
}
-static void elink_update_link_attr(struct elink_params *params,
- uint32_t link_attr)
-{
- struct bnx2x_softc *sc = params->sc;
-
- if (SHMEM2_HAS(sc, link_attr_sync))
- REG_WR(sc, params->shmem2_base +
- offsetof(struct shmem2_region,
- link_attr_sync[params->port]), link_attr);
-}
-
static void elink_update_pfc_nig(struct elink_params *params,
- struct elink_nig_brb_pfc_port_params
- *nig_params)
+ __rte_unused struct elink_vars *vars,
+ struct elink_nig_brb_pfc_port_params *nig_params)
{
- uint32_t xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en =
- 0;
+ uint32_t xcm_mask = 0, ppp_enable = 0, pause_enable = 0;
+ uint32_t llfc_out_en = 0;
uint32_t llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
uint32_t pkt_priority_to_cos = 0;
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
int set_pfc = params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED;
- PMD_DRV_LOG(DEBUG, "updating pfc nig parameters");
+ ELINK_FEATURE_CONFIG_PFC_ENABLED;
+ ELINK_DEBUG_P0(sc, "updating pfc nig parameters");
/* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
* MAC control frames (that are not pause packets)
@@ -1916,19 +2967,19 @@ static void elink_update_pfc_nig(struct elink_params *params,
else
ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
- NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
xcm_out_en = 0;
hwpfc_enable = 1;
- } else {
+ } else {
if (nig_params) {
llfc_out_en = nig_params->llfc_out_en;
llfc_enable = nig_params->llfc_enable;
pause_enable = nig_params->pause_enable;
- } else /* Default non PFC mode - PAUSE */
+ } else /* Default non PFC mode - PAUSE */
pause_enable = 1;
xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
- NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
xcm_out_en = 1;
}
@@ -1965,9 +3016,7 @@ static void elink_update_pfc_nig(struct elink_params *params,
for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
elink_pfc_nig_rx_priority_mask(sc, i,
- nig_params->
- rx_cos_priority_mask[i],
- port);
+ nig_params->rx_cos_priority_mask[i], port);
REG_WR(sc, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
@@ -1978,13 +3027,13 @@ static void elink_update_pfc_nig(struct elink_params *params,
nig_params->llfc_low_priority_classes);
}
REG_WR(sc, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
- NIG_REG_P0_PKT_PRIORITY_TO_COS, pkt_priority_to_cos);
+ NIG_REG_P0_PKT_PRIORITY_TO_COS,
+ pkt_priority_to_cos);
}
elink_status_t elink_update_pfc(struct elink_params *params,
- struct elink_vars *vars,
- struct elink_nig_brb_pfc_port_params
- *pfc_params)
+ struct elink_vars *vars,
+ struct elink_nig_brb_pfc_port_params *pfc_params)
{
/* The PFC and pause are orthogonal to one another, meaning when
* PFC is enabled, the pause are disabled, and when PFC is
@@ -1992,7 +3041,6 @@ elink_status_t elink_update_pfc(struct elink_params *params,
*/
uint32_t val;
struct bnx2x_softc *sc = params->sc;
- elink_status_t elink_status = ELINK_STATUS_OK;
uint8_t bmac_loopback = (params->loopback_mode == ELINK_LOOPBACK_BMAC);
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
@@ -2003,24 +3051,24 @@ elink_status_t elink_update_pfc(struct elink_params *params,
elink_update_mng(params, vars->link_status);
/* Update NIG params */
- elink_update_pfc_nig(params, pfc_params);
+ elink_update_pfc_nig(params, vars, pfc_params);
if (!vars->link_up)
- return elink_status;
+ return ELINK_STATUS_OK;
- PMD_DRV_LOG(DEBUG, "About to update PFC in BMAC");
+ ELINK_DEBUG_P0(sc, "About to update PFC in BMAC");
if (CHIP_IS_E3(sc)) {
if (vars->mac_type == ELINK_MAC_TYPE_XMAC)
- elink_update_pfc_xmac(params, vars);
+ elink_update_pfc_xmac(params, vars, 0);
} else {
val = REG_RD(sc, MISC_REG_RESET_REG_2);
if ((val &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
== 0) {
- PMD_DRV_LOG(DEBUG, "About to update PFC in EMAC");
+ ELINK_DEBUG_P0(sc, "About to update PFC in EMAC");
elink_emac_enable(params, vars, 0);
- return elink_status;
+ return ELINK_STATUS_OK;
}
if (CHIP_IS_E2(sc))
elink_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2034,20 +3082,21 @@ elink_status_t elink_update_pfc(struct elink_params *params,
val = 1;
REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port * 4, val);
}
- return elink_status;
+ return ELINK_STATUS_OK;
}
static elink_status_t elink_bmac1_enable(struct elink_params *params,
- struct elink_vars *vars, uint8_t is_lb)
+ struct elink_vars *vars,
+ uint8_t is_lb)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
uint32_t wb_data[2];
uint32_t val;
- PMD_DRV_LOG(DEBUG, "Enabling BigMAC1");
+ ELINK_DEBUG_P0(sc, "Enabling BigMAC1");
/* XGXS control */
wb_data[0] = 0x3c;
@@ -2057,16 +3106,18 @@ static elink_status_t elink_bmac1_enable(struct elink_params *params,
/* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
- (params->mac_addr[3] << 16) |
- (params->mac_addr[4] << 8) | params->mac_addr[5]);
- wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]);
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
/* MAC control */
val = 0x3;
if (is_lb) {
val |= 0x4;
- PMD_DRV_LOG(DEBUG, "enable bmac loopback");
+ ELINK_DEBUG_P0(sc, "enable bmac loopback");
}
wb_data[0] = val;
wb_data[1] = 0;
@@ -2094,20 +3145,30 @@ static elink_status_t elink_bmac1_enable(struct elink_params *params,
wb_data[1] = 0;
REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
+#ifdef ELINK_INCLUDE_EMUL
+ /* Fix for emulation */
+ if (CHIP_REV_IS_EMUL(sc)) {
+ wb_data[0] = 0xf000;
+ wb_data[1] = 0;
+ REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
+ wb_data, 2);
+ }
+#endif
return ELINK_STATUS_OK;
}
static elink_status_t elink_bmac2_enable(struct elink_params *params,
- struct elink_vars *vars, uint8_t is_lb)
+ struct elink_vars *vars,
+ uint8_t is_lb)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
uint32_t wb_data[2];
- PMD_DRV_LOG(DEBUG, "Enabling BigMAC2");
+ ELINK_DEBUG_P0(sc, "Enabling BigMAC2");
wb_data[0] = 0;
wb_data[1] = 0;
@@ -2124,9 +3185,11 @@ static elink_status_t elink_bmac2_enable(struct elink_params *params,
/* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
- (params->mac_addr[3] << 16) |
- (params->mac_addr[4] << 8) | params->mac_addr[5]);
- wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]);
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
wb_data, 2);
@@ -2161,8 +3224,8 @@ static elink_status_t elink_bmac2_enable(struct elink_params *params,
}
static elink_status_t elink_bmac_enable(struct elink_params *params,
- struct elink_vars *vars,
- uint8_t is_lb, uint8_t reset_bmac)
+ struct elink_vars *vars,
+ uint8_t is_lb, uint8_t reset_bmac)
{
elink_status_t rc = ELINK_STATUS_OK;
uint8_t port = params->port;
@@ -2181,7 +3244,7 @@ static elink_status_t elink_bmac_enable(struct elink_params *params,
/* Enable access for bmac registers */
REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1);
- /* Enable BMAC according to BMAC type */
+ /* Enable BMAC according to BMAC type*/
if (CHIP_IS_E2(sc))
rc = elink_bmac2_enable(params, vars, is_lb);
else
@@ -2191,7 +3254,7 @@ static elink_status_t elink_bmac_enable(struct elink_params *params,
REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 0x0);
val = 0;
if ((params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
+ ELINK_FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & ELINK_FLOW_CTRL_TX))
val = 1;
REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + port * 4, val);
@@ -2205,13 +3268,15 @@ static elink_status_t elink_bmac_enable(struct elink_params *params,
return rc;
}
-static void elink_set_bmac_rx(struct bnx2x_softc *sc, uint8_t port, uint8_t en)
+static void elink_set_bmac_rx(struct bnx2x_softc *sc,
+ __rte_unused uint32_t chip_id,
+ uint8_t port, uint8_t en)
{
uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
uint32_t wb_data[2];
- uint32_t nig_bmac_enable =
- REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
+ uint32_t nig_bmac_enable = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN +
+ port * 4);
if (CHIP_IS_E2(sc))
bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
@@ -2219,7 +3284,8 @@ static void elink_set_bmac_rx(struct bnx2x_softc *sc, uint8_t port, uint8_t en)
bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
/* Only if the bmac is out of reset */
if (REG_RD(sc, MISC_REG_RESET_REG_2) &
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && nig_bmac_enable) {
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
+ nig_bmac_enable) {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(sc, bmac_addr, wb_data, 2);
if (en)
@@ -2232,7 +3298,8 @@ static void elink_set_bmac_rx(struct bnx2x_softc *sc, uint8_t port, uint8_t en)
}
static elink_status_t elink_pbf_update(struct elink_params *params,
- uint32_t flow_ctrl, uint32_t line_speed)
+ uint32_t flow_ctrl,
+ uint32_t line_speed)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
@@ -2245,7 +3312,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
/* Wait for init credit */
init_crd = REG_RD(sc, PBF_REG_P0_INIT_CRD + port * 4);
crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8);
- PMD_DRV_LOG(DEBUG, "init_crd 0x%x crd 0x%x", init_crd, crd);
+ ELINK_DEBUG_P2(sc, "init_crd 0x%x crd 0x%x", init_crd, crd);
while ((init_crd != crd) && count) {
DELAY(1000 * 5);
@@ -2254,24 +3321,25 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
}
crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8);
if (init_crd != crd) {
- PMD_DRV_LOG(DEBUG, "BUG! init_crd 0x%x != crd 0x%x",
- init_crd, crd);
+ ELINK_DEBUG_P2(sc, "BUG! init_crd 0x%x != crd 0x%x",
+ init_crd, crd);
return ELINK_STATUS_ERROR;
}
if (flow_ctrl & ELINK_FLOW_CTRL_RX ||
line_speed == ELINK_SPEED_10 ||
line_speed == ELINK_SPEED_100 ||
- line_speed == ELINK_SPEED_1000 || line_speed == ELINK_SPEED_2500) {
+ line_speed == ELINK_SPEED_1000 ||
+ line_speed == ELINK_SPEED_2500) {
REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 1);
/* Update threshold */
REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, 0);
/* Update init credit */
- init_crd = 778; /* (800-18-4) */
+ init_crd = 778; /* (800-18-4) */
} else {
uint32_t thresh = (ELINK_ETH_MAX_JUMBO_PACKET_SIZE +
- ELINK_ETH_OVREHEAD) / 16;
+ ELINK_ETH_OVREHEAD) / 16;
REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0);
/* Update threshold */
REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, thresh);
@@ -2281,14 +3349,14 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
init_crd = thresh + 553 - 22;
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x",
- line_speed);
+ ELINK_DEBUG_P1(sc, "Invalid line_speed 0x%x",
+ line_speed);
return ELINK_STATUS_ERROR;
}
}
REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, init_crd);
- PMD_DRV_LOG(DEBUG, "PBF updated to speed %d credit %d",
- line_speed, init_crd);
+ ELINK_DEBUG_P2(sc, "PBF updated to speed %d credit %d",
+ line_speed, init_crd);
/* Probe the credit changes */
REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0x1);
@@ -2316,7 +3384,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
* the emac_base for the CL45 read/writes operations
*/
static uint32_t elink_get_emac_base(struct bnx2x_softc *sc,
- uint32_t mdc_mdio_access, uint8_t port)
+ uint32_t mdc_mdio_access, uint8_t port)
{
uint32_t emac_base = 0;
switch (mdc_mdio_access) {
@@ -2364,7 +3432,8 @@ static elink_status_t elink_cl22_write(struct bnx2x_softc *sc,
/* Address */
tmp = ((phy->addr << 21) | (reg << 16) | val |
- EMAC_MDIO_COMM_COMMAND_WRITE_22 | EMAC_MDIO_COMM_START_BUSY);
+ EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+ EMAC_MDIO_COMM_START_BUSY);
REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
@@ -2377,7 +3446,7 @@ static elink_status_t elink_cl22_write(struct bnx2x_softc *sc,
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "write phy register failed");
+ ELINK_DEBUG_P0(sc, "write phy register failed");
rc = ELINK_STATUS_TIMEOUT;
}
REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
@@ -2386,7 +3455,7 @@ static elink_status_t elink_cl22_write(struct bnx2x_softc *sc,
static elink_status_t elink_cl22_read(struct bnx2x_softc *sc,
struct elink_phy *phy,
- uint16_t reg, uint16_t * ret_val)
+ uint16_t reg, uint16_t *ret_val)
{
uint32_t val, mode;
uint16_t i;
@@ -2399,7 +3468,8 @@ static elink_status_t elink_cl22_read(struct bnx2x_softc *sc,
/* Address */
val = ((phy->addr << 21) | (reg << 16) |
- EMAC_MDIO_COMM_COMMAND_READ_22 | EMAC_MDIO_COMM_START_BUSY);
+ EMAC_MDIO_COMM_COMMAND_READ_22 |
+ EMAC_MDIO_COMM_START_BUSY);
REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
@@ -2407,13 +3477,13 @@ static elink_status_t elink_cl22_read(struct bnx2x_softc *sc,
val = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
- *ret_val = (uint16_t) (val & EMAC_MDIO_COMM_DATA);
+ *ret_val = (uint16_t)(val & EMAC_MDIO_COMM_DATA);
DELAY(5);
break;
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "read phy register failed");
+ ELINK_DEBUG_P0(sc, "read phy register failed");
*ret_val = 0;
rc = ELINK_STATUS_TIMEOUT;
@@ -2426,14 +3496,17 @@ static elink_status_t elink_cl22_read(struct bnx2x_softc *sc,
/* CL45 access functions */
/******************************************************************/
static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
- struct elink_phy *phy, uint8_t devad,
- uint16_t reg, uint16_t * ret_val)
+ struct elink_phy *phy,
+ uint8_t devad, uint16_t reg, uint16_t *ret_val)
{
uint32_t val;
uint16_t i;
elink_status_t rc = ELINK_STATUS_OK;
+ uint32_t chip_id;
if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) {
- elink_set_mdio_clk(sc, phy->mdio_ctrl);
+ chip_id = (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12);
+ elink_set_mdio_clk(sc, chip_id, phy->mdio_ctrl);
}
if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0)
@@ -2441,7 +3514,8 @@ static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
EMAC_MDIO_STATUS_10MB);
/* Address */
val = ((phy->addr << 21) | (devad << 16) | reg |
- EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY);
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
@@ -2454,8 +3528,9 @@ static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "read phy register failed");
- elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+ ELINK_DEBUG_P0(sc, "read phy register failed");
+ elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT);
+ /* "MDC/MDIO access timeout" */
*ret_val = 0;
rc = ELINK_STATUS_TIMEOUT;
@@ -2472,14 +3547,16 @@ static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
val = REG_RD(sc, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
- *ret_val =
- (uint16_t) (val & EMAC_MDIO_COMM_DATA);
+ *ret_val = (uint16_t)
+ (val & EMAC_MDIO_COMM_DATA);
break;
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "read phy register failed");
- elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+ ELINK_DEBUG_P0(sc, "read phy register failed");
+ elink_cb_event_log(sc,
+ ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT);
+ /* "MDC/MDIO access timeout" */
*ret_val = 0;
rc = ELINK_STATUS_TIMEOUT;
@@ -2501,14 +3578,17 @@ static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
}
static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
- struct elink_phy *phy, uint8_t devad,
- uint16_t reg, uint16_t val)
+ struct elink_phy *phy,
+ uint8_t devad, uint16_t reg, uint16_t val)
{
uint32_t tmp;
uint8_t i;
elink_status_t rc = ELINK_STATUS_OK;
+ uint32_t chip_id;
if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) {
- elink_set_mdio_clk(sc, phy->mdio_ctrl);
+ chip_id = (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12);
+ elink_set_mdio_clk(sc, chip_id, phy->mdio_ctrl);
}
if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0)
@@ -2517,7 +3597,8 @@ static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
/* Address */
tmp = ((phy->addr << 21) | (devad << 16) | reg |
- EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY);
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
@@ -2530,8 +3611,9 @@ static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "write phy register failed");
- elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+ ELINK_DEBUG_P0(sc, "write phy register failed");
+ elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT);
+ /* "MDC/MDIO access timeout" */
rc = ELINK_STATUS_TIMEOUT;
} else {
@@ -2552,8 +3634,10 @@ static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "write phy register failed");
- elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
+ ELINK_DEBUG_P0(sc, "write phy register failed");
+ elink_cb_event_log(sc,
+ ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT);
+ /* "MDC/MDIO access timeout" */
rc = ELINK_STATUS_TIMEOUT;
}
@@ -2580,14 +3664,14 @@ static uint8_t elink_eee_has_cap(struct elink_params *params)
struct bnx2x_softc *sc = params->sc;
if (REG_RD(sc, params->shmem2_base) <=
- offsetof(struct shmem2_region, eee_status[params->port]))
- return 0;
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
return 1;
}
static elink_status_t elink_eee_nvram_to_time(uint32_t nvram_mode,
- uint32_t * idle_timer)
+ uint32_t *idle_timer)
{
switch (nvram_mode) {
case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
@@ -2608,7 +3692,7 @@ static elink_status_t elink_eee_nvram_to_time(uint32_t nvram_mode,
}
static elink_status_t elink_eee_time_to_nvram(uint32_t idle_timer,
- uint32_t * nvram_mode)
+ uint32_t *nvram_mode)
{
switch (idle_timer) {
case ELINK_EEE_MODE_NVRAM_BALANCED_TIME:
@@ -2635,7 +3719,7 @@ static uint32_t elink_eee_calc_timer(struct elink_params *params)
if (params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) {
if (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME) {
- /* time value in eee_mode --> used directly */
+ /* time value in eee_mode --> used directly*/
eee_idle = params->eee_mode & ELINK_EEE_MODE_TIMER_MASK;
} else {
/* hsi value in eee_mode --> time */
@@ -2645,12 +3729,11 @@ static uint32_t elink_eee_calc_timer(struct elink_params *params)
return 0;
}
} else {
- /* hsi values in nvram --> time */
+ /* hsi values in nvram --> time*/
eee_mode = ((REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_feature_config
- [params->
- port].eee_power_mode)) &
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
@@ -2662,7 +3745,7 @@ static uint32_t elink_eee_calc_timer(struct elink_params *params)
}
static elink_status_t elink_eee_set_timers(struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_vars *vars)
{
uint32_t eee_idle = 0, eee_mode;
struct bnx2x_softc *sc = params->sc;
@@ -2675,7 +3758,7 @@ static elink_status_t elink_eee_set_timers(struct elink_params *params,
} else if ((params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI) &&
(params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) &&
(params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME)) {
- PMD_DRV_LOG(DEBUG, "Error: Tx LPI is enabled with timer 0");
+ ELINK_DEBUG_P0(sc, "Error: Tx LPI is enabled with timer 0");
return ELINK_STATUS_ERROR;
}
@@ -2684,7 +3767,7 @@ static elink_status_t elink_eee_set_timers(struct elink_params *params,
/* eee_idle in 1u --> eee_status in 16u */
eee_idle >>= 4;
vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
- SHMEM_EEE_TIME_OUTPUT_BIT;
+ SHMEM_EEE_TIME_OUTPUT_BIT;
} else {
if (elink_eee_time_to_nvram(eee_idle, &eee_mode))
return ELINK_STATUS_ERROR;
@@ -2695,8 +3778,7 @@ static elink_status_t elink_eee_set_timers(struct elink_params *params,
}
static elink_status_t elink_eee_initial_config(struct elink_params *params,
- struct elink_vars *vars,
- uint8_t mode)
+ struct elink_vars *vars, uint8_t mode)
{
vars->eee_status |= ((uint32_t) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
@@ -2715,8 +3797,8 @@ static elink_status_t elink_eee_initial_config(struct elink_params *params,
}
static elink_status_t elink_eee_disable(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
@@ -2731,9 +3813,8 @@ static elink_status_t elink_eee_disable(struct elink_phy *phy,
}
static elink_status_t elink_eee_advertise(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars,
- uint8_t modes)
+ struct elink_params *params,
+ struct elink_vars *vars, uint8_t modes)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val = 0;
@@ -2742,11 +3823,11 @@ static elink_status_t elink_eee_advertise(struct elink_phy *phy,
REG_WR(sc, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
if (modes & SHMEM_EEE_10G_ADV) {
- PMD_DRV_LOG(DEBUG, "Advertise 10GBase-T EEE");
+ ELINK_DEBUG_P0(sc, "Advertise 10GBase-T EEE");
val |= 0x8;
}
if (modes & SHMEM_EEE_1G_ADV) {
- PMD_DRV_LOG(DEBUG, "Advertise 1GBase-T EEE");
+ ELINK_DEBUG_P0(sc, "Advertise 1GBase-T EEE");
val |= 0x4;
}
@@ -2770,8 +3851,8 @@ static void elink_update_mng_eee(struct elink_params *params,
}
static void elink_eee_an_resolve(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t adv = 0, lp = 0;
@@ -2786,7 +3867,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
if (adv & 0x2) {
if (vars->line_speed == ELINK_SPEED_100)
neg = 1;
- PMD_DRV_LOG(DEBUG, "EEE negotiated - 100M");
+ ELINK_DEBUG_P0(sc, "EEE negotiated - 100M");
}
}
if (lp & 0x14) {
@@ -2794,7 +3875,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
if (adv & 0x14) {
if (vars->line_speed == ELINK_SPEED_1000)
neg = 1;
- PMD_DRV_LOG(DEBUG, "EEE negotiated - 1G");
+ ELINK_DEBUG_P0(sc, "EEE negotiated - 1G");
}
}
if (lp & 0x68) {
@@ -2802,7 +3883,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
if (adv & 0x68) {
if (vars->line_speed == ELINK_SPEED_10000)
neg = 1;
- PMD_DRV_LOG(DEBUG, "EEE negotiated - 10G");
+ ELINK_DEBUG_P0(sc, "EEE negotiated - 10G");
}
}
@@ -2810,7 +3891,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
if (neg) {
- PMD_DRV_LOG(DEBUG, "EEE is active");
+ ELINK_DEBUG_P0(sc, "EEE is active");
vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
}
}
@@ -2831,37 +3912,34 @@ static void elink_bsc_module_sel(struct elink_params *params)
dev_info.shared_hw_config.board));
i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
- SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+ SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
/* Read I2C output value */
sfp_ctrl = REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- e3_cmn_pin_cfg));
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg));
i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
- PMD_DRV_LOG(DEBUG, "Setting BSC switch");
+ ELINK_DEBUG_P0(sc, "Setting BSC switch");
for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
elink_set_cfg_pin(sc, i2c_pins[idx], i2c_val[idx]);
}
-static elink_status_t elink_bsc_read(struct elink_params *params,
- struct bnx2x_softc *sc,
- uint8_t sl_devid,
- uint16_t sl_addr,
- uint8_t lc_addr,
- uint8_t xfer_cnt, uint32_t * data_array)
+static elink_status_t elink_bsc_read(struct bnx2x_softc *sc,
+ uint8_t sl_devid,
+ uint16_t sl_addr,
+ uint8_t lc_addr,
+ uint8_t xfer_cnt,
+ uint32_t *data_array)
{
uint32_t val, i;
elink_status_t rc = ELINK_STATUS_OK;
if (xfer_cnt > 16) {
- PMD_DRV_LOG(DEBUG, "invalid xfer_cnt %d. Max is 16 bytes",
- xfer_cnt);
+ ELINK_DEBUG_P1(sc, "invalid xfer_cnt %d. Max is 16 bytes",
+ xfer_cnt);
return ELINK_STATUS_ERROR;
}
- if (params)
- elink_bsc_module_sel(params);
xfer_cnt = 16 - lc_addr;
@@ -2874,11 +3952,11 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
val = (sl_devid << 16) | sl_addr;
REG_WR(sc, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
- /* Start xfer with 0 byte to update the address pointer ??? */
+ /* Start xfer with 0 byte to update the address pointer ???*/
val = (MCPR_IMC_COMMAND_ENABLE) |
- (MCPR_IMC_COMMAND_WRITE_OP <<
- MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
- (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+ (MCPR_IMC_COMMAND_WRITE_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val);
/* Poll for completion */
@@ -2888,8 +3966,8 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
DELAY(10);
val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
if (i++ > 1000) {
- PMD_DRV_LOG(DEBUG, "wr 0 byte timed out after %d try",
- i);
+ ELINK_DEBUG_P1(sc, "wr 0 byte timed out after %d try",
+ i);
rc = ELINK_STATUS_TIMEOUT;
break;
}
@@ -2899,10 +3977,10 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
/* Start xfer with read op */
val = (MCPR_IMC_COMMAND_ENABLE) |
- (MCPR_IMC_COMMAND_READ_OP <<
- MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
- (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
- (xfer_cnt);
+ (MCPR_IMC_COMMAND_READ_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+ (xfer_cnt);
REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val);
/* Poll for completion */
@@ -2912,7 +3990,7 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
DELAY(10);
val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
if (i++ > 1000) {
- PMD_DRV_LOG(DEBUG, "rd op timed out after %d try", i);
+ ELINK_DEBUG_P1(sc, "rd op timed out after %d try", i);
rc = ELINK_STATUS_TIMEOUT;
break;
}
@@ -2924,17 +4002,18 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
data_array[i] = REG_RD(sc, (MCP_REG_MCPR_IMC_DATAREG0 + i * 4));
#ifdef __BIG_ENDIAN
data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
- ((data_array[i] & 0x0000ff00) << 8) |
- ((data_array[i] & 0x00ff0000) >> 8) |
- ((data_array[i] & 0xff000000) >> 24);
+ ((data_array[i] & 0x0000ff00) << 8) |
+ ((data_array[i] & 0x00ff0000) >> 8) |
+ ((data_array[i] & 0xff000000) >> 24);
#endif
}
return rc;
}
static void elink_cl45_read_or_write(struct bnx2x_softc *sc,
- struct elink_phy *phy, uint8_t devad,
- uint16_t reg, uint16_t or_val)
+ struct elink_phy *phy,
+ uint8_t devad, uint16_t reg,
+ uint16_t or_val)
{
uint16_t val;
elink_cl45_read(sc, phy, devad, reg, &val);
@@ -2951,7 +4030,42 @@ static void elink_cl45_read_and_write(struct bnx2x_softc *sc,
elink_cl45_write(sc, phy, devad, reg, val & and_val);
}
-static uint8_t elink_get_warpcore_lane(struct elink_params *params)
+elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr,
+ uint8_t devad, uint16_t reg, uint16_t *ret_val)
+{
+ uint8_t phy_index;
+ /* Probe for the phy according to the given phy_addr, and execute
+ * the read request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return elink_cl45_read(params->sc,
+ &params->phy[phy_index], devad,
+ reg, ret_val);
+ }
+ }
+ return ELINK_STATUS_ERROR;
+}
+
+elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr,
+ uint8_t devad, uint16_t reg, uint16_t val)
+{
+ uint8_t phy_index;
+ /* Probe for the phy according to the given phy_addr, and execute
+ * the write request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return elink_cl45_write(params->sc,
+ &params->phy[phy_index], devad,
+ reg, val);
+ }
+ }
+ return ELINK_STATUS_ERROR;
+}
+
+static uint8_t elink_get_warpcore_lane(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
{
uint8_t lane = 0;
struct bnx2x_softc *sc = params->sc;
@@ -2985,14 +4099,16 @@ static uint8_t elink_get_warpcore_lane(struct elink_params *params)
port = port ^ 1;
lane = (port << 1) + path;
- } else { /* Two port mode - no port swap */
+ } else { /* Two port mode - no port swap */
/* Figure out path swap value */
- path_swap_ovr = REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+ path_swap_ovr =
+ REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
if (path_swap_ovr & 0x1) {
path_swap = (path_swap_ovr & 0x2);
} else {
- path_swap = REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP);
+ path_swap =
+ REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP);
}
if (path_swap)
path = path ^ 1;
@@ -3002,6 +4118,7 @@ static uint8_t elink_get_warpcore_lane(struct elink_params *params)
return lane;
}
+
static void elink_set_aer_mmd(struct elink_params *params,
struct elink_phy *phy)
{
@@ -3010,13 +4127,13 @@ static void elink_set_aer_mmd(struct elink_params *params,
struct bnx2x_softc *sc = params->sc;
ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
- (phy->addr + ser_lane) : 0;
+ (phy->addr + ser_lane) : 0;
if (USES_WARPCORE(sc)) {
- aer_val = elink_get_warpcore_lane(params);
+ aer_val = elink_get_warpcore_lane(phy, params);
/* In Dual-lane mode, two lanes are joined together,
* so in order to configure them, the AER broadcast method is
* used here.
@@ -3049,7 +4166,7 @@ static void elink_set_serdes_access(struct bnx2x_softc *sc, uint8_t port)
DELAY(500);
REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
DELAY(500);
- /* Set Clause 45 */
+ /* Set Clause 45 */
REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_ST + port * 0x10, 0);
}
@@ -3057,7 +4174,7 @@ static void elink_serdes_deassert(struct bnx2x_softc *sc, uint8_t port)
{
uint32_t val;
- PMD_DRV_LOG(DEBUG, "elink_serdes_deassert");
+ ELINK_DEBUG_P0(sc, "elink_serdes_deassert");
val = ELINK_SERDES_RESET_BITS << (port * 16);
@@ -3092,7 +4209,7 @@ static void elink_xgxs_deassert(struct elink_params *params)
struct bnx2x_softc *sc = params->sc;
uint8_t port;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "elink_xgxs_deassert");
+ ELINK_DEBUG_P0(sc, "elink_xgxs_deassert");
port = params->port;
val = ELINK_XGXS_RESET_BITS << (port * 16);
@@ -3107,8 +4224,9 @@ static void elink_xgxs_deassert(struct elink_params *params)
static void elink_calc_ieee_aneg_adv(struct elink_phy *phy,
struct elink_params *params,
- uint16_t * ieee_fc)
+ uint16_t *ieee_fc)
{
+ struct bnx2x_softc *sc = params->sc;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
/* Resolve pause mode and advertisement Please refer to Table
* 28B-3 of the 802.3ab-1999 spec
@@ -3118,12 +4236,12 @@ static void elink_calc_ieee_aneg_adv(struct elink_phy *phy,
case ELINK_FLOW_CTRL_AUTO:
switch (params->req_fc_auto_adv) {
case ELINK_FLOW_CTRL_BOTH:
+ case ELINK_FLOW_CTRL_RX:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
break;
- case ELINK_FLOW_CTRL_RX:
case ELINK_FLOW_CTRL_TX:
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
default:
break;
@@ -3143,16 +4261,18 @@ static void elink_calc_ieee_aneg_adv(struct elink_phy *phy,
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
break;
}
- PMD_DRV_LOG(DEBUG, "ieee_fc = 0x%x", *ieee_fc);
+ ELINK_DEBUG_P1(sc, "ieee_fc = 0x%x", *ieee_fc);
}
-static void set_phy_vars(struct elink_params *params, struct elink_vars *vars)
+static void set_phy_vars(struct elink_params *params,
+ struct elink_vars *vars)
{
+ struct bnx2x_softc *sc = params->sc;
uint8_t actual_phy_idx, phy_index, link_cfg_idx;
uint8_t phy_config_swapped = params->multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys;
- phy_index++) {
+ phy_index++) {
link_cfg_idx = ELINK_LINK_CONFIG_IDX(phy_index);
actual_phy_idx = phy_index;
if (phy_config_swapped) {
@@ -3162,26 +4282,26 @@ static void set_phy_vars(struct elink_params *params, struct elink_vars *vars)
actual_phy_idx = ELINK_EXT_PHY1;
}
params->phy[actual_phy_idx].req_flow_ctrl =
- params->req_flow_ctrl[link_cfg_idx];
+ params->req_flow_ctrl[link_cfg_idx];
params->phy[actual_phy_idx].req_line_speed =
- params->req_line_speed[link_cfg_idx];
+ params->req_line_speed[link_cfg_idx];
params->phy[actual_phy_idx].speed_cap_mask =
- params->speed_cap_mask[link_cfg_idx];
+ params->speed_cap_mask[link_cfg_idx];
params->phy[actual_phy_idx].req_duplex =
- params->req_duplex[link_cfg_idx];
+ params->req_duplex[link_cfg_idx];
if (params->req_line_speed[link_cfg_idx] ==
ELINK_SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
- PMD_DRV_LOG(DEBUG, "req_flow_ctrl %x, req_line_speed %x,"
- " speed_cap_mask %x",
- params->phy[actual_phy_idx].req_flow_ctrl,
- params->phy[actual_phy_idx].req_line_speed,
- params->phy[actual_phy_idx].speed_cap_mask);
+ ELINK_DEBUG_P3(sc, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
}
}
@@ -3199,38 +4319,57 @@ static void elink_ext_phy_set_pause(struct elink_params *params,
/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
}
- PMD_DRV_LOG(DEBUG, "Ext phy AN advertize 0x%x", val);
+ ELINK_DEBUG_P1(sc, "Ext phy AN advertize 0x%x", val);
elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
}
-static void elink_pause_resolve(struct elink_vars *vars, uint32_t pause_result)
-{ /* LD LP */
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
+static void elink_pause_resolve(__rte_unused struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars,
+ uint32_t pause_result)
+{
+ struct bnx2x_softc *sc = params->sc;
+ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
+ ELINK_DEBUG_P0(sc, "Flow Control: TX only");
vars->flow_ctrl = ELINK_FLOW_CTRL_TX;
break;
- case 0xe: /* 1 1 1 0 */
+ case 0xe: /* 1 1 1 0 */
+ ELINK_DEBUG_P0(sc, "Flow Control: RX only");
vars->flow_ctrl = ELINK_FLOW_CTRL_RX;
break;
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
+ /* If the user selected to advertise RX ONLY,
+ * although we advertised both, need to enable
+ * RX only.
+ */
+
+ if (params->req_fc_auto_adv == ELINK_FLOW_CTRL_BOTH) {
+ ELINK_DEBUG_P0(sc, "Flow Control: RX & TX");
vars->flow_ctrl = ELINK_FLOW_CTRL_BOTH;
+ } else {
+ ELINK_DEBUG_P0(sc, "Flow Control: RX only");
+ vars->flow_ctrl = ELINK_FLOW_CTRL_RX;
+ }
break;
-
default:
+ ELINK_DEBUG_P0(sc, "Flow Control: None");
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
break;
}
if (pause_result & (1 << 0))
@@ -3244,22 +4383,23 @@ static void elink_ext_phy_update_adv_fc(struct elink_phy *phy,
struct elink_params *params,
struct elink_vars *vars)
{
- uint16_t ld_pause; /* local */
- uint16_t lp_pause; /* link partner */
+ uint16_t ld_pause; /* local */
+ uint16_t lp_pause; /* link partner */
uint16_t pause_result;
struct bnx2x_softc *sc = params->sc;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE) {
elink_cl22_read(sc, phy, 0x4, &ld_pause);
elink_cl22_read(sc, phy, 0x5, &lp_pause);
- } else if (CHIP_IS_E3(sc) && ELINK_SINGLE_MEDIA_DIRECT(params)) {
- uint8_t lane = elink_get_warpcore_lane(params);
+ } else if (CHIP_IS_E3(sc) &&
+ ELINK_SINGLE_MEDIA_DIRECT(params)) {
+ uint8_t lane = elink_get_warpcore_lane(phy, params);
uint16_t gp_status, gp_mask;
elink_cl45_read(sc, phy,
MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4,
&gp_status);
gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL |
MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) <<
- lane;
+ lane;
if ((gp_status & gp_mask) == gp_mask) {
elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_ADV_PAUSE, &ld_pause);
@@ -3285,16 +4425,18 @@ static void elink_ext_phy_update_adv_fc(struct elink_phy *phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
}
- pause_result = (ld_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
- pause_result |= (lp_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- PMD_DRV_LOG(DEBUG, "Ext PHY pause result 0x%x", pause_result);
- elink_pause_resolve(vars, pause_result);
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ ELINK_DEBUG_P1(sc, "Ext PHY pause result 0x%x", pause_result);
+ elink_pause_resolve(phy, params, vars, pause_result);
}
static uint8_t elink_ext_phy_resolve_fc(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint8_t ret = 0;
vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
@@ -3312,7 +4454,6 @@ static uint8_t elink_ext_phy_resolve_fc(struct elink_phy *phy,
}
return ret;
}
-
/******************************************************************/
/* Warpcore section */
/******************************************************************/
@@ -3321,19 +4462,31 @@ static uint8_t elink_ext_phy_resolve_fc(struct elink_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
-#define WC_TX_DRIVER(post2, idriver, ipre) \
+#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \
((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
(idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
- (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \
+ (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET))
#define WC_TX_FIR(post, main, pre) \
((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
(main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
(pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+static void elink_update_link_attr(struct elink_params *params,
+ uint32_t link_attr)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ if (SHMEM2_HAS(sc, link_attr_sync))
+ REG_WR(sc, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port]), link_attr);
+}
+
static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
struct elink_params *params,
- struct elink_vars *vars)
+ __rte_unused struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t i;
@@ -3356,7 +4509,7 @@ static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
};
- PMD_DRV_LOG(DEBUG, "Enabling 20G-KR2");
+ ELINK_DEBUG_P0(sc, "Enabling 20G-KR2");
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3 << 6));
@@ -3366,15 +4519,16 @@ static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
reg_set[i].val);
/* Start KR2 work-around timer which handles BNX2X8073 link-parner */
- vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
- elink_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ elink_update_link_attr(params, params->link_attr_sync);
}
static void elink_disable_kr2(struct elink_params *params,
- struct elink_vars *vars, struct elink_phy *phy)
+ struct elink_vars *vars,
+ struct elink_phy *phy)
{
struct bnx2x_softc *sc = params->sc;
- uint32_t i;
+ int i;
static struct elink_reg_set reg_set[] = {
/* Step 1 - Program the TX/RX alignment markers */
{MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
@@ -3393,13 +4547,13 @@ static void elink_disable_kr2(struct elink_params *params,
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
};
- PMD_DRV_LOG(DEBUG, "Disabling 20G-KR2");
+ ELINK_DEBUG_P0(sc, "Disabling 20G-KR2");
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ for (i = 0; i < (int)ARRAY_SIZE(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- elink_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ elink_update_link_attr(params, params->link_attr_sync);
vars->check_kr2_recovery_cnt = ELINK_CHECK_KR2_RECOVERY_CNT;
}
@@ -3409,7 +4563,7 @@ static void elink_warpcore_set_lpi_passthrough(struct elink_phy *phy,
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Configure WC for LPI pass through");
+ ELINK_DEBUG_P0(sc, "Configure WC for LPI pass through");
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
@@ -3421,7 +4575,7 @@ static void elink_warpcore_restart_AN_KR(struct elink_phy *phy,
{
/* Restart autoneg on the leading lane only */
struct bnx2x_softc *sc = params->sc;
- uint16_t lane = elink_get_warpcore_lane(params);
+ uint16_t lane = elink_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
@@ -3433,9 +4587,9 @@ static void elink_warpcore_restart_AN_KR(struct elink_phy *phy,
static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
struct elink_params *params,
- struct elink_vars *vars)
-{
- uint16_t lane, i, cl72_ctrl, an_adv = 0;
+ struct elink_vars *vars) {
+ uint16_t lane, i, cl72_ctrl, an_adv = 0, val;
+ uint32_t wc_lane_config;
struct bnx2x_softc *sc = params->sc;
static struct elink_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3447,7 +4601,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
};
- PMD_DRV_LOG(DEBUG, "Enable Auto Negotiation for KR");
+ ELINK_DEBUG_P0(sc, "Enable Auto Negotiation for KR");
/* Set to default registers that may be overridden by 10G force */
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
@@ -3469,11 +4623,11 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
/* Enable CL37 1G Parallel Detect */
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, addr, 0x1);
- PMD_DRV_LOG(DEBUG, "Advertize 1G");
+ ELINK_DEBUG_P0(sc, "Advertize 1G");
}
if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
- (vars->line_speed == ELINK_SPEED_10000)) {
+ (vars->line_speed == ELINK_SPEED_10000)) {
/* Check adding advertisement for 10G KR */
an_adv |= (1 << 7);
/* Enable 10G Parallel Detect */
@@ -3483,23 +4637,25 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
elink_set_aer_mmd(params, phy);
- PMD_DRV_LOG(DEBUG, "Advertize 10G");
+ ELINK_DEBUG_P0(sc, "Advertize 10G");
}
/* Set Transmit PMD settings */
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane,
- WC_TX_DRIVER(0x02, 0x06, 0x09));
+ WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
/* Configure the next lane if dual mode */
if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * (lane + 1),
- WC_TX_DRIVER(0x02, 0x06, 0x09));
+ WC_TX_DRIVER(0x02, 0x06, 0x09, 0));
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 0x03f0);
+ MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+ 0x03f0);
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, 0x03f0);
+ MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+ 0x03f0);
/* Advertised speeds */
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
@@ -3513,14 +4669,13 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
/* Enable CL37 BAM */
if (REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- default_cfg)) &
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
1);
- PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR");
+ ELINK_DEBUG_P0(sc, "Enable CL37 BAM on KR");
}
/* Advertise pause */
@@ -3531,7 +4686,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
/* Over 1G - AN local device user page 1 */
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
@@ -3542,7 +4697,8 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX1_PCI_CTRL +
- (0x10 * lane), (1 << 11));
+ (0x10 * lane),
+ (1 << 11));
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
@@ -3550,6 +4706,31 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
elink_warpcore_enable_AN_KR2(phy, params, vars);
} else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+ wc_lane_config = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ shared_hw_config.wc_lane_config));
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ val |= 1 << 11;
+
+ /* Restore Polarity settings in case it was run over by
+ * previous link owner
+ */
+ if (wc_lane_config &
+ (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+ val |= 3 << 2;
+ else
+ val &= ~(3 << 2);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+ val);
+
elink_disable_kr2(params, vars, phy);
}
@@ -3558,7 +4739,8 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
}
static void elink_warpcore_set_10G_KR(struct elink_phy *phy,
- struct elink_params *params)
+ struct elink_params *params,
+ __rte_unused struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val16, i, lane;
@@ -3566,7 +4748,7 @@ static void elink_warpcore_set_10G_KR(struct elink_phy *phy,
/* Disable Autoneg */
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- 0x3f00},
+ 0x3f00},
{MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
@@ -3579,7 +4761,7 @@ static void elink_warpcore_set_10G_KR(struct elink_phy *phy,
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
/* Global registers */
CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
@@ -3609,7 +4791,8 @@ static void elink_warpcore_set_10G_KR(struct elink_phy *phy,
MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
/* Turn TX scramble payload only the 64/66 scrambler */
- elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX66_CONTROL, 0x9);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX66_CONTROL, 0x9);
/* Turn RX scramble payload only the 64/66 scrambler */
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
@@ -3630,7 +4813,7 @@ static void elink_warpcore_set_10G_XFI(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t misc1_val, tap_val, tx_driver_val, lane, val;
uint32_t cfg_tap_val, tx_drv_brdct, tx_equal;
-
+ uint32_t ifir_val, ipost2_val, ipre_driver_val;
/* Hold rxSeqStart */
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3675,38 +4858,59 @@ static void elink_warpcore_set_10G_XFI(struct elink_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
- tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0);
} else {
cfg_tap_val = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->
- port].sfi_tap_values));
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
- tx_drv_brdct = (cfg_tap_val &
- PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
- PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
-
misc1_val |= 0x9;
/* TAP values are controlled by nvram, if value there isn't 0 */
if (tx_equal)
- tap_val = (uint16_t) tx_equal;
+ tap_val = (uint16_t)tx_equal;
else
tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
- if (tx_drv_brdct)
- tx_driver_val =
- WC_TX_DRIVER(0x03, (uint16_t) tx_drv_brdct, 0x06);
- else
- tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
+ ifir_val = DEFAULT_TX_DRV_IFIR;
+ ipost2_val = DEFAULT_TX_DRV_POST2;
+ ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER;
+ tx_drv_brdct = DEFAULT_TX_DRV_BRDCT;
+
+ /* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all
+ * configuration.
+ */
+ if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK |
+ PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK |
+ PORT_HW_CFG_TX_DRV_POST2_MASK)) {
+ ifir_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_IFIR_MASK) >>
+ PORT_HW_CFG_TX_DRV_IFIR_SHIFT;
+ ipre_driver_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK)
+ >> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT;
+ ipost2_val = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_POST2_MASK) >>
+ PORT_HW_CFG_TX_DRV_POST2_SHIFT;
+ }
+
+ if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) {
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+ }
+
+ tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct,
+ ipre_driver_val, ifir_val);
}
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
/* Set Transmit PMD settings */
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
@@ -3754,7 +4958,8 @@ static void elink_warpcore_set_20G_force_KR2(struct elink_phy *phy,
elink_cl45_read_and_write(sc, phy, MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_KR_CONTROL, ~(1 << 1));
- elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+ elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CTRL, 0);
/* Turn off CL73 */
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL73_USERB0_CTRL, &val);
@@ -3790,7 +4995,8 @@ static void elink_warpcore_set_20G_force_KR2(struct elink_phy *phy,
}
static void elink_warpcore_set_20G_DXGXS(struct bnx2x_softc *sc,
- struct elink_phy *phy, uint16_t lane)
+ struct elink_phy *phy,
+ uint16_t lane)
{
/* Rx0 anaRxControl1G */
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
@@ -3800,13 +5006,17 @@ static void elink_warpcore_set_20G_DXGXS(struct bnx2x_softc *sc,
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
- elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW0, 0xE070);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0, 0xE070);
- elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1, 0xC0D0);
- elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2, 0xA0B0);
- elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW3, 0x8090);
+ elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3, 0x8090);
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
@@ -3835,7 +5045,7 @@ static void elink_warpcore_set_20G_DXGXS(struct bnx2x_softc *sc,
MDIO_WC_REG_TX_FIR_TAP_ENABLE));
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane,
- WC_TX_DRIVER(0x02, 0x02, 0x02));
+ WC_TX_DRIVER(0x02, 0x02, 0x02, 0));
}
static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
@@ -3857,7 +5067,7 @@ static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
0x1000);
- PMD_DRV_LOG(DEBUG, "set SGMII AUTONEG");
+ ELINK_DEBUG_P0(sc, "set SGMII AUTONEG");
} else {
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3872,9 +5082,8 @@ static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
val16 |= 0x0040;
break;
default:
- PMD_DRV_LOG(DEBUG,
- "Speed not supported: 0x%x",
- phy->req_line_speed);
+ ELINK_DEBUG_P1(sc,
+ "Speed not supported: 0x%x", phy->req_line_speed);
return;
}
@@ -3882,13 +5091,13 @@ static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
val16 |= 0x0100;
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
- PMD_DRV_LOG(DEBUG, "set SGMII force speed %d",
- phy->req_line_speed);
+ ELINK_DEBUG_P1(sc, "set SGMII force speed %d",
+ phy->req_line_speed);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- PMD_DRV_LOG(DEBUG, " (readback) %x", val16);
+ ELINK_DEBUG_P1(sc, " (readback) %x", val16);
}
/* SGMII Slave mode and disable signal detect */
@@ -3900,28 +5109,31 @@ static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
digctrl_kx1 &= 0xff4a;
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, digctrl_kx1);
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ digctrl_kx1);
/* Turn off parallel detect */
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- (digctrl_kx2 & ~(1 << 2)));
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 & ~(1 << 2)));
/* Re-enable parallel detect */
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- (digctrl_kx2 | (1 << 2)));
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 | (1 << 2)));
/* Enable autodet */
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
- (digctrl_kx1 | 0x10));
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (digctrl_kx1 | 0x10));
}
+
static void elink_warpcore_reset_lane(struct bnx2x_softc *sc,
- struct elink_phy *phy, uint8_t reset)
+ struct elink_phy *phy,
+ uint8_t reset)
{
uint16_t val;
/* Take lane out of reset after configuration is finished */
@@ -3934,7 +5146,7 @@ static void elink_warpcore_reset_lane(struct bnx2x_softc *sc,
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, val);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC6, &val);
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
}
/* Clear SFI/XFI link settings registers */
@@ -3950,11 +5162,11 @@ static void elink_warpcore_clear_regs(struct elink_phy *phy,
{MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
- 0x0195},
+ 0x0195},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- 0x0007},
+ 0x0007},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
- 0x0002},
+ 0x0002},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
{MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
{MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
@@ -3968,40 +5180,41 @@ static void elink_warpcore_clear_regs(struct elink_phy *phy,
elink_cl45_write(sc, phy, wc_regs[i].devad, wc_regs[i].reg,
wc_regs[i].val);
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, 0x0990);
}
static elink_status_t elink_get_mod_abs_int_cfg(struct bnx2x_softc *sc,
+ __rte_unused uint32_t chip_id,
uint32_t shmem_base,
uint8_t port,
- uint8_t * gpio_num,
- uint8_t * gpio_port)
+ uint8_t *gpio_num,
+ uint8_t *gpio_port)
{
uint32_t cfg_pin;
*gpio_num = 0;
*gpio_port = 0;
if (CHIP_IS_E3(sc)) {
cfg_pin = (REG_RD(sc, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- e3_sfp_ctrl)) &
- PORT_HW_CFG_E3_MOD_ABS_MASK) >>
- PORT_HW_CFG_E3_MOD_ABS_SHIFT;
-
- /* Should not happen. This function called upon interrupt
- * triggered by GPIO ( since EPIO can only generate interrupts
- * to MCP).
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+ PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+ /*
+ * This should not happen since this function is called
+ * from interrupt triggered by GPIO (since EPIO can only
+ * generate interrupts to MCP).
* So if this function was called and none of the GPIOs was set,
- * it means the shit hit the fan.
+ * it means something disastrous has already happened.
*/
if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
(cfg_pin > PIN_CFG_GPIO3_P1)) {
- PMD_DRV_LOG(DEBUG,
- "No cfg pin %x for module detect indication",
- cfg_pin);
+ ELINK_DEBUG_P1(sc,
+ "No cfg pin %x for module detect indication",
+ cfg_pin);
return ELINK_STATUS_ERROR;
}
@@ -4015,12 +5228,13 @@ static elink_status_t elink_get_mod_abs_int_cfg(struct bnx2x_softc *sc,
return ELINK_STATUS_OK;
}
-static int elink_is_sfp_module_plugged(struct elink_params *params)
+static int elink_is_sfp_module_plugged(__rte_unused struct elink_phy *phy,
+ struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
uint8_t gpio_num, gpio_port;
uint32_t gpio_val;
- if (elink_get_mod_abs_int_cfg(sc,
+ if (elink_get_mod_abs_int_cfg(sc, params->chip_id,
params->shmem_base, params->port,
&gpio_num, &gpio_port) != ELINK_STATUS_OK)
return 0;
@@ -4032,17 +5246,16 @@ static int elink_is_sfp_module_plugged(struct elink_params *params)
else
return 0;
}
-
static int elink_warpcore_get_sigdet(struct elink_phy *phy,
struct elink_params *params)
{
uint16_t gp2_status_reg0, lane;
struct bnx2x_softc *sc = params->sc;
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
- &gp2_status_reg0);
+ &gp2_status_reg0);
return (gp2_status_reg0 >> (8 + lane)) & 0x1;
}
@@ -4061,38 +5274,35 @@ static void elink_warpcore_config_runtime(struct elink_phy *phy,
return;
if (vars->rx_tx_asic_rst) {
- uint16_t lane = elink_get_warpcore_lane(params);
+ uint16_t lane = elink_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config
- [params->port].
- default_cfg)) &
- PORT_HW_CFG_NET_SERDES_IF_MASK);
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
/* Do we get link yet? */
elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 0x81d1,
&gp_status1);
- lnkup = (gp_status1 >> (8 + lane)) & 0x1; /* 1G */
- /*10G KR */
+ lnkup = (gp_status1 >> (8 + lane)) & 0x1;/* 1G */
+ /*10G KR*/
lnkup_kr = (gp_status1 >> (12 + lane)) & 0x1;
if (lnkup_kr || lnkup) {
vars->rx_tx_asic_rst = 0;
} else {
- /* Reset the lane to see if link comes up. */
+ /* Reset the lane to see if link comes up.*/
elink_warpcore_reset_lane(sc, phy, 1);
elink_warpcore_reset_lane(sc, phy, 0);
/* Restart Autoneg */
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL,
- 0x1200);
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
vars->rx_tx_asic_rst--;
- PMD_DRV_LOG(DEBUG, "0x%x retry left",
- vars->rx_tx_asic_rst);
+ ELINK_DEBUG_P1(sc, "0x%x retry left",
+ vars->rx_tx_asic_rst);
}
break;
@@ -4100,29 +5310,29 @@ static void elink_warpcore_config_runtime(struct elink_phy *phy,
break;
}
- }
- /*params->rx_tx_asic_rst */
+ } /*params->rx_tx_asic_rst*/
}
static void elink_warpcore_config_sfi(struct elink_phy *phy,
struct elink_params *params)
{
- uint16_t lane = elink_get_warpcore_lane(params);
-
+ uint16_t lane = elink_get_warpcore_lane(phy, params);
+ struct bnx2x_softc *sc = params->sc;
elink_warpcore_clear_regs(phy, params, lane);
if ((params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)] ==
ELINK_SPEED_10000) &&
(phy->media_type != ELINK_ETH_PHY_SFP_1G_FIBER)) {
- PMD_DRV_LOG(DEBUG, "Setting 10G SFI");
+ ELINK_DEBUG_P0(sc, "Setting 10G SFI");
elink_warpcore_set_10G_XFI(phy, params, 0);
} else {
- PMD_DRV_LOG(DEBUG, "Setting 1G Fiber");
+ ELINK_DEBUG_P0(sc, "Setting 1G Fiber");
elink_warpcore_set_sgmii_speed(phy, params, 1, 0);
}
}
static void elink_sfp_e3_set_transmitter(struct elink_params *params,
- struct elink_phy *phy, uint8_t tx_en)
+ struct elink_phy *phy,
+ uint8_t tx_en)
{
struct bnx2x_softc *sc = params->sc;
uint32_t cfg_pin;
@@ -4131,9 +5341,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params,
cfg_pin = REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[port].e3_sfp_ctrl)) &
- PORT_HW_CFG_E3_TX_LASER_MASK;
+ PORT_HW_CFG_E3_TX_LASER_MASK;
/* Set the !tx_en since this pin is DISABLE_TX_LASER */
- PMD_DRV_LOG(DEBUG, "Setting WC TX to %d", tx_en);
+ ELINK_DEBUG_P1(sc, "Setting WC TX to %d", tx_en);
/* For 20G, the expected pin to be used is 3 pins after the current */
elink_set_cfg_pin(sc, cfg_pin, tx_en ^ 1);
@@ -4142,21 +5352,20 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params,
}
static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint32_t serdes_net_if;
uint8_t fiber_mode;
- uint16_t lane = elink_get_warpcore_lane(params);
+ uint16_t lane = elink_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- default_cfg)) &
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_NET_SERDES_IF_MASK);
- PMD_DRV_LOG(DEBUG,
- "Begin Warpcore init, link_speed %d, "
- "serdes_net_if = 0x%x", vars->line_speed, serdes_net_if);
+ ELINK_DEBUG_P2(sc, "Begin Warpcore init, link_speed %d, "
+ "serdes_net_if = 0x%x",
+ vars->line_speed, serdes_net_if);
elink_set_aer_mmd(params, phy);
elink_warpcore_reset_lane(sc, phy, 1);
vars->phy_flags |= PHY_XGXS_FLAG;
@@ -4165,7 +5374,7 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
((phy->req_line_speed == ELINK_SPEED_100) ||
(phy->req_line_speed == ELINK_SPEED_10)))) {
vars->phy_flags |= PHY_SGMII_FLAG;
- PMD_DRV_LOG(DEBUG, "Setting SGMII mode");
+ ELINK_DEBUG_P0(sc, "Setting SGMII mode");
elink_warpcore_clear_regs(phy, params, lane);
elink_warpcore_set_sgmii_speed(phy, params, 0, 1);
} else {
@@ -4175,27 +5384,28 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
if (params->loopback_mode != ELINK_LOOPBACK_EXT)
elink_warpcore_enable_AN_KR(phy, params, vars);
else {
- PMD_DRV_LOG(DEBUG, "Setting KR 10G-Force");
- elink_warpcore_set_10G_KR(phy, params);
+ ELINK_DEBUG_P0(sc, "Setting KR 10G-Force");
+ elink_warpcore_set_10G_KR(phy, params, vars);
}
break;
case PORT_HW_CFG_NET_SERDES_IF_XFI:
elink_warpcore_clear_regs(phy, params, lane);
if (vars->line_speed == ELINK_SPEED_10000) {
- PMD_DRV_LOG(DEBUG, "Setting 10G XFI");
+ ELINK_DEBUG_P0(sc, "Setting 10G XFI");
elink_warpcore_set_10G_XFI(phy, params, 1);
} else {
if (ELINK_SINGLE_MEDIA_DIRECT(params)) {
- PMD_DRV_LOG(DEBUG, "1G Fiber");
+ ELINK_DEBUG_P0(sc, "1G Fiber");
fiber_mode = 1;
} else {
- PMD_DRV_LOG(DEBUG, "10/100/1G SGMII");
+ ELINK_DEBUG_P0(sc, "10/100/1G SGMII");
fiber_mode = 0;
}
elink_warpcore_set_sgmii_speed(phy,
- params,
- fiber_mode, 0);
+ params,
+ fiber_mode,
+ 0);
}
break;
@@ -4207,7 +5417,7 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
*/
if ((params->loopback_mode == ELINK_LOOPBACK_NONE) ||
(params->loopback_mode == ELINK_LOOPBACK_EXT)) {
- if (elink_is_sfp_module_plugged(params))
+ if (elink_is_sfp_module_plugged(phy, params))
elink_sfp_module_detection(phy, params);
else
elink_sfp_e3_set_transmitter(params,
@@ -4219,10 +5429,10 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
if (vars->line_speed != ELINK_SPEED_20000) {
- PMD_DRV_LOG(DEBUG, "Speed not supported yet");
+ ELINK_DEBUG_P0(sc, "Speed not supported yet");
return 0;
}
- PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS");
+ ELINK_DEBUG_P0(sc, "Setting 20G DXGXS");
elink_warpcore_set_20G_DXGXS(sc, phy, lane);
/* Issue Module detection */
@@ -4232,21 +5442,21 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
if (!params->loopback_mode) {
elink_warpcore_enable_AN_KR(phy, params, vars);
} else {
- PMD_DRV_LOG(DEBUG, "Setting KR 20G-Force");
+ ELINK_DEBUG_P0(sc, "Setting KR 20G-Force");
elink_warpcore_set_20G_force_KR2(phy, params);
}
break;
default:
- PMD_DRV_LOG(DEBUG,
- "Unsupported Serdes Net Interface 0x%x",
- serdes_net_if);
+ ELINK_DEBUG_P1(sc,
+ "Unsupported Serdes Net Interface 0x%x",
+ serdes_net_if);
return 0;
}
}
/* Take lane out of reset after configuration is finished */
elink_warpcore_reset_lane(sc, phy, 0);
- PMD_DRV_LOG(DEBUG, "Exit config init");
+ ELINK_DEBUG_P0(sc, "Exit config init");
return 0;
}
@@ -4275,11 +5485,12 @@ static void elink_warpcore_link_reset(struct elink_phy *phy,
MDIO_AER_BLOCK_AER_REG, 0);
/* Enable 1G MDIO (1-copy) */
elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~0x10);
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ ~0x10);
elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
/* Disable CL36 PCS Tx */
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
@@ -4311,8 +5522,8 @@ static void elink_set_warpcore_loopback(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t val16;
uint32_t lane;
- PMD_DRV_LOG(DEBUG, "Setting Warpcore loopback type %x, speed %d",
- params->loopback_mode, phy->req_line_speed);
+ ELINK_DEBUG_P2(sc, "Setting Warpcore loopback type %x, speed %d",
+ params->loopback_mode, phy->req_line_speed);
if (phy->req_line_speed < ELINK_SPEED_10000 ||
phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) {
@@ -4326,14 +5537,15 @@ static void elink_set_warpcore_loopback(struct elink_phy *phy,
MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
0x10);
/* Set 1G loopback based on lane (1-copy) */
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
val16 |= (1 << lane);
if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)
val16 |= (2 << lane);
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL2, val16);
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16);
/* Switch back to 4-copy registers */
elink_set_aer_mmd(params, phy);
@@ -4347,8 +5559,10 @@ static void elink_set_warpcore_loopback(struct elink_phy *phy,
}
}
+
+
static void elink_sync_link(struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t link_10g_plus;
@@ -4356,21 +5570,23 @@ static void elink_sync_link(struct elink_params *params,
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
if (vars->link_up) {
- PMD_DRV_LOG(DEBUG, "phy link up");
+ ELINK_DEBUG_P0(sc, "phy link up");
+ ELINK_DEBUG_P1(sc, "link status = %x", vars->link_status);
vars->phy_link_up = 1;
vars->duplex = DUPLEX_FULL;
- switch (vars->link_status & LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ switch (vars->link_status &
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case ELINK_LINK_10THD:
vars->duplex = DUPLEX_HALF;
- /* Fall through */
+ /* Fall thru */
case ELINK_LINK_10TFD:
vars->line_speed = ELINK_SPEED_10;
break;
case ELINK_LINK_100TXHD:
vars->duplex = DUPLEX_HALF;
- /* Fall through */
+ /* Fall thru */
case ELINK_LINK_100T4:
case ELINK_LINK_100TXFD:
vars->line_speed = ELINK_SPEED_100;
@@ -4378,14 +5594,14 @@ static void elink_sync_link(struct elink_params *params,
case ELINK_LINK_1000THD:
vars->duplex = DUPLEX_HALF;
- /* Fall through */
+ /* Fall thru */
case ELINK_LINK_1000TFD:
vars->line_speed = ELINK_SPEED_1000;
break;
case ELINK_LINK_2500THD:
vars->duplex = DUPLEX_HALF;
- /* Fall through */
+ /* Fall thru */
case ELINK_LINK_2500TFD:
vars->line_speed = ELINK_SPEED_2500;
break;
@@ -4417,7 +5633,8 @@ static void elink_sync_link(struct elink_params *params,
vars->phy_flags &= ~PHY_SGMII_FLAG;
}
if (vars->line_speed &&
- USES_WARPCORE(sc) && (vars->line_speed == ELINK_SPEED_1000))
+ USES_WARPCORE(sc) &&
+ (vars->line_speed == ELINK_SPEED_1000))
vars->phy_flags |= PHY_SGMII_FLAG;
/* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= ELINK_SPEED_10000);
@@ -4433,8 +5650,8 @@ static void elink_sync_link(struct elink_params *params,
else
vars->mac_type = ELINK_MAC_TYPE_EMAC;
}
- } else { /* Link down */
- PMD_DRV_LOG(DEBUG, "phy link down");
+ } else { /* Link down */
+ ELINK_DEBUG_P0(sc, "phy link down");
vars->phy_link_up = 0;
@@ -4478,44 +5695,44 @@ void elink_link_status_update(struct elink_params *params,
elink_sync_link(params, vars);
/* Sync media type */
sync_offset = params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].media_type);
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].media_type);
media_types = REG_RD(sc, sync_offset);
params->phy[ELINK_INT_PHY].media_type =
- (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
- PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
params->phy[ELINK_EXT_PHY1].media_type =
- (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
- PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
params->phy[ELINK_EXT_PHY2].media_type =
- (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
- PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
- PMD_DRV_LOG(DEBUG, "media_types = 0x%x", media_types);
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+ ELINK_DEBUG_P1(sc, "media_types = 0x%x", media_types);
/* Sync AEU offset */
sync_offset = params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].aeu_int_mask);
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
vars->aeu_int_mask = REG_RD(sc, sync_offset);
/* Sync PFC status */
if (vars->link_status & LINK_STATUS_PFC_ENABLED)
params->feature_config_flags |=
- ELINK_FEATURE_CONFIG_PFC_ENABLED;
+ ELINK_FEATURE_CONFIG_PFC_ENABLED;
else
params->feature_config_flags &=
- ~ELINK_FEATURE_CONFIG_PFC_ENABLED;
+ ~ELINK_FEATURE_CONFIG_PFC_ENABLED;
if (SHMEM2_HAS(sc, link_attr_sync))
- vars->link_attr_sync = SHMEM2_RD(sc,
+ params->link_attr_sync = SHMEM2_RD(sc,
link_attr_sync[params->port]);
- PMD_DRV_LOG(DEBUG, "link_status 0x%x phy_link_up %x int_mask 0x%x",
- vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
- PMD_DRV_LOG(DEBUG, "line_speed %x duplex %x flow_ctrl 0x%x",
- vars->line_speed, vars->duplex, vars->flow_ctrl);
+ ELINK_DEBUG_P3(sc, "link_status 0x%x phy_link_up %x int_mask 0x%x",
+ vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
+ ELINK_DEBUG_P3(sc, "line_speed %x duplex %x flow_ctrl 0x%x",
+ vars->line_speed, vars->duplex, vars->flow_ctrl);
}
static void elink_set_master_ln(struct elink_params *params,
@@ -4530,7 +5747,8 @@ static void elink_set_master_ln(struct elink_params *params,
/* Set the master_ln for AN */
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE, &new_master_ln);
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
@@ -4539,8 +5757,8 @@ static void elink_set_master_ln(struct elink_params *params,
}
static elink_status_t elink_reset_unicore(struct elink_params *params,
- struct elink_phy *phy,
- uint8_t set_serdes)
+ struct elink_phy *phy,
+ uint8_t set_serdes)
{
struct bnx2x_softc *sc = params->sc;
uint16_t mii_control;
@@ -4553,7 +5771,8 @@ static elink_status_t elink_reset_unicore(struct elink_params *params,
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
if (set_serdes)
elink_set_serdes_access(sc, params->port);
@@ -4564,7 +5783,8 @@ static elink_status_t elink_reset_unicore(struct elink_params *params,
/* The reset erased the previous bank value */
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
DELAY(5);
@@ -4572,10 +5792,12 @@ static elink_status_t elink_reset_unicore(struct elink_params *params,
}
}
- elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized,"
- // " Port %d",
+ elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port);
+ /* "Warning: PHY was not initialized,"
+ * " Port %d",
+ */
- PMD_DRV_LOG(DEBUG, "BUG! XGXS is still in reset!");
+ ELINK_DEBUG_P0(sc, "BUG! XGXS is still in reset!");
return ELINK_STATUS_ERROR;
}
@@ -4629,31 +5851,35 @@ static void elink_set_parallel_detection(struct elink_phy *phy,
uint16_t control2;
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, &control2);
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
- PMD_DRV_LOG(DEBUG, "phy->speed_cap_mask = 0x%x, control2 = 0x%x",
- phy->speed_cap_mask, control2);
+ ELINK_DEBUG_P2(sc, "phy->speed_cap_mask = 0x%x, control2 = 0x%x",
+ phy->speed_cap_mask, control2);
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, control2);
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
- PMD_DRV_LOG(DEBUG, "XGXS");
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ ELINK_DEBUG_P0(sc, "XGXS");
CL22_WR_OVER_CL45(sc, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
&control2);
+
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
@@ -4673,7 +5899,8 @@ static void elink_set_parallel_detection(struct elink_phy *phy,
static void elink_set_autoneg(struct elink_phy *phy,
struct elink_params *params,
- struct elink_vars *vars, uint8_t enable_cl73)
+ struct elink_vars *vars,
+ uint8_t enable_cl73)
{
struct bnx2x_softc *sc = params->sc;
uint16_t reg_val;
@@ -4686,7 +5913,7 @@ static void elink_set_autoneg(struct elink_phy *phy,
/* CL37 Autoneg Enabled */
if (vars->line_speed == ELINK_SPEED_AUTO_NEG)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
- else /* CL37 Autoneg Disabled */
+ else /* CL37 Autoneg Disabled */
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
@@ -4700,7 +5927,7 @@ static void elink_set_autoneg(struct elink_phy *phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
if (vars->line_speed == ELINK_SPEED_AUTO_NEG)
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
@@ -4714,7 +5941,8 @@ static void elink_set_autoneg(struct elink_phy *phy,
/* Enable TetonII and BAM autoneg */
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, &reg_val);
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ &reg_val);
if (vars->line_speed == ELINK_SPEED_AUTO_NEG) {
/* Enable BAM aneg Mode and TetonII aneg Mode */
reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
@@ -4726,40 +5954,45 @@ static void elink_set_autoneg(struct elink_phy *phy,
}
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, reg_val);
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
if (enable_cl73) {
/* Enable Cl73 FSM status bits */
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_UCTRL, 0xe);
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
- /* Enable BAM Station Manager */
+ /* Enable BAM Station Manager*/
CL22_WR_OVER_CL45(sc, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_BAM_CTRL1,
- MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
- MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN
- |
- MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
/* Advertise CL73 link speeds */
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
- if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ &reg_val);
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
- if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
/* CL73 Autoneg Enabled */
reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
- } else /* CL73 Autoneg Disabled */
+ } else /* CL73 Autoneg Disabled */
reg_val = 0;
CL22_WR_OVER_CL45(sc, phy,
@@ -4775,7 +6008,7 @@ static void elink_program_serdes(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t reg_val;
- /* Program duplex, disable autoneg and sgmii */
+ /* Program duplex, disable autoneg and sgmii*/
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4795,7 +6028,7 @@ static void elink_program_serdes(struct elink_phy *phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* Clearing the speed value before setting the right speed */
- PMD_DRV_LOG(DEBUG, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x", reg_val);
+ ELINK_DEBUG_P1(sc, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
@@ -4808,7 +6041,7 @@ static void elink_program_serdes(struct elink_phy *phy,
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
if (vars->line_speed == ELINK_SPEED_10000)
reg_val |=
- MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
}
CL22_WR_OVER_CL45(sc, phy,
@@ -4829,10 +6062,12 @@ static void elink_set_brcm_cl37_advertisement(struct elink_phy *phy,
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
CL22_WR_OVER_CL45(sc, phy,
- MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_UP1, val);
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
CL22_WR_OVER_CL45(sc, phy,
- MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_UP3, 0x400);
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
}
static void elink_set_ieee_aneg_advertisement(struct elink_phy *phy,
@@ -4863,7 +6098,7 @@ static void elink_restart_autoneg(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t mii_control;
- PMD_DRV_LOG(DEBUG, "elink_restart_autoneg");
+ ELINK_DEBUG_P0(sc, "elink_restart_autoneg");
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
@@ -4876,16 +6111,17 @@ static void elink_restart_autoneg(struct elink_phy *phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
(mii_control |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- PMD_DRV_LOG(DEBUG,
- "elink_restart_autoneg mii_control before = 0x%x",
- mii_control);
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+ ELINK_DEBUG_P1(sc,
+ "elink_restart_autoneg mii_control before = 0x%x",
+ mii_control);
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4906,7 +6142,8 @@ static void elink_initialize_sgmii_process(struct elink_phy *phy,
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &control1);
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
/* Set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
@@ -4914,7 +6151,8 @@ static void elink_initialize_sgmii_process(struct elink_phy *phy,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, control1);
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
/* If forced speed */
if (!(vars->line_speed == ELINK_SPEED_AUTO_NEG)) {
@@ -4923,7 +6161,8 @@ static void elink_initialize_sgmii_process(struct elink_phy *phy,
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -4931,30 +6170,32 @@ static void elink_initialize_sgmii_process(struct elink_phy *phy,
switch (vars->line_speed) {
case ELINK_SPEED_100:
mii_control |=
- MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
break;
case ELINK_SPEED_1000:
mii_control |=
- MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
break;
case ELINK_SPEED_10:
/* There is nothing to set for 10M */
break;
default:
/* Invalid speed for SGMII */
- PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x",
- vars->line_speed);
+ ELINK_DEBUG_P1(sc, "Invalid line_speed 0x%x",
+ vars->line_speed);
break;
}
/* Setting the full duplex */
if (phy->req_duplex == DUPLEX_FULL)
- mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, mii_control);
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
- } else { /* AN mode */
+ } else { /* AN mode */
/* Enable and restart AN */
elink_restart_autoneg(phy, params, 0);
}
@@ -4963,8 +6204,7 @@ static void elink_initialize_sgmii_process(struct elink_phy *phy,
/* Link management
*/
static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy,
- struct elink_params
- *params)
+ struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
uint16_t pd_10g, status2_1000x;
@@ -4972,34 +6212,38 @@ static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy,
return ELINK_STATUS_OK;
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2, &status2_1000x);
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2, &status2_1000x);
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
- PMD_DRV_LOG(DEBUG, "1G parallel detect link on port %d",
- params->port);
- return ELINK_STATUS_ERROR;
+ ELINK_DEBUG_P1(sc, "1G parallel detect link on port %d",
+ params->port);
+ return 1;
}
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, &pd_10g);
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
- PMD_DRV_LOG(DEBUG, "10G parallel detect link on port %d",
- params->port);
- return ELINK_STATUS_ERROR;
+ ELINK_DEBUG_P1(sc, "10G parallel detect link on port %d",
+ params->port);
+ return 1;
}
return ELINK_STATUS_OK;
}
static void elink_update_adv_fc(struct elink_phy *phy,
struct elink_params *params,
- struct elink_vars *vars, uint32_t gp_status)
+ struct elink_vars *vars,
+ uint32_t gp_status)
{
- uint16_t ld_pause; /* local driver */
- uint16_t lp_pause; /* link partner */
+ uint16_t ld_pause; /* local driver */
+ uint16_t lp_pause; /* link partner */
uint16_t pause_result;
struct bnx2x_softc *sc = params->sc;
if ((gp_status &
@@ -5010,37 +6254,42 @@ static void elink_update_adv_fc(struct elink_phy *phy,
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, &ld_pause);
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_LP_ADV1, &lp_pause);
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
- PMD_DRV_LOG(DEBUG, "pause_result CL73 0x%x", pause_result);
+ ELINK_DEBUG_P1(sc, "pause_result CL73 0x%x", pause_result);
} else {
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV, &ld_pause);
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
CL22_RD_OVER_CL45(sc, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
- &lp_pause);
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 5;
pause_result |= (lp_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 7;
- PMD_DRV_LOG(DEBUG, "pause_result CL37 0x%x", pause_result);
+ ELINK_DEBUG_P1(sc, "pause_result CL37 0x%x", pause_result);
}
- elink_pause_resolve(vars, pause_result);
+ elink_pause_resolve(phy, params, vars, pause_result);
}
static void elink_flow_ctrl_resolve(struct elink_phy *phy,
struct elink_params *params,
- struct elink_vars *vars, uint32_t gp_status)
+ struct elink_vars *vars,
+ uint32_t gp_status)
{
+ struct bnx2x_softc *sc = params->sc;
vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
/* Resolve from gp_status in case of AN complete and not sgmii */
@@ -5060,7 +6309,7 @@ static void elink_flow_ctrl_resolve(struct elink_phy *phy,
}
elink_update_adv_fc(phy, params, vars, gp_status);
}
- PMD_DRV_LOG(DEBUG, "flow_ctrl 0x%x", vars->flow_ctrl);
+ ELINK_DEBUG_P1(sc, "flow_ctrl 0x%x", vars->flow_ctrl);
}
static void elink_check_fallback_to_cl37(struct elink_phy *phy,
@@ -5068,14 +6317,16 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
{
struct bnx2x_softc *sc = params->sc;
uint16_t rx_status, ustat_val, cl37_fsm_received;
- PMD_DRV_LOG(DEBUG, "elink_check_fallback_to_cl37");
+ ELINK_DEBUG_P0(sc, "elink_check_fallback_to_cl37");
/* Step 1: Make sure signal is detected */
CL22_RD_OVER_CL45(sc, phy,
- MDIO_REG_BANK_RX0, MDIO_RX0_RX_STATUS, &rx_status);
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
- PMD_DRV_LOG(DEBUG, "Signal is not detected. Restoring CL73."
- "rx_status(0x80b0) = 0x%x", rx_status);
+ ELINK_DEBUG_P1(sc, "Signal is not detected. Restoring CL73."
+ "rx_status(0x80b0) = 0x%x", rx_status);
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
@@ -5085,14 +6336,15 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
/* Step 2: Check CL73 state machine */
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_USTAT1, &ustat_val);
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
if ((ustat_val &
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
- MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
- PMD_DRV_LOG(DEBUG, "CL73 state-machine is not stable. "
- "ustat_val(0x8371) = 0x%x", ustat_val);
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
+ ELINK_DEBUG_P1(sc, "CL73 state-machine is not stable. "
+ "ustat_val(0x8371) = 0x%x", ustat_val);
return;
}
/* Step 3: Check CL37 Message Pages received to indicate LP
@@ -5100,14 +6352,16 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
*/
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_REMOTE_PHY,
- MDIO_REMOTE_PHY_MISC_RX_STATUS, &cl37_fsm_received);
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_received);
if ((cl37_fsm_received &
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
- MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
- MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
- PMD_DRV_LOG(DEBUG, "No CL37 FSM were received. "
- "misc_rx_status(0x8330) = 0x%x", cl37_fsm_received);
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
+ ELINK_DEBUG_P1(sc, "No CL37 FSM were received. "
+ "misc_rx_status(0x8330) = 0x%x",
+ cl37_fsm_received);
return;
}
/* The combined cl37/cl73 fsm state information indicating that
@@ -5119,34 +6373,38 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
/* Disable CL73 */
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 0);
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
/* Restart CL37 autoneg */
elink_restart_autoneg(phy, params, 0);
- PMD_DRV_LOG(DEBUG, "Disabling CL73, and restarting CL37 autoneg");
+ ELINK_DEBUG_P0(sc, "Disabling CL73, and restarting CL37 autoneg");
}
static void elink_xgxs_an_resolve(struct elink_phy *phy,
struct elink_params *params,
- struct elink_vars *vars, uint32_t gp_status)
+ struct elink_vars *vars,
+ uint32_t gp_status)
{
if (gp_status & ELINK_MDIO_AN_CL73_OR_37_COMPLETE)
- vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
if (elink_direct_parallel_detect_used(phy, params))
- vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
}
-
static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
- struct elink_params *params __rte_unused,
- struct elink_vars *vars,
- uint16_t is_link_up,
- uint16_t speed_mask,
- uint16_t is_duplex)
+ struct elink_params *params,
+ struct elink_vars *vars,
+ uint16_t is_link_up,
+ uint16_t speed_mask,
+ uint16_t is_duplex)
{
+ struct bnx2x_softc *sc = params->sc;
if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
if (is_link_up) {
- PMD_DRV_LOG(DEBUG, "phy link up");
+ ELINK_DEBUG_P0(sc, "phy link up");
vars->phy_link_up = 1;
vars->link_status |= LINK_STATUS_LINK_UP;
@@ -5187,9 +6445,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
case ELINK_GP_STATUS_5G:
case ELINK_GP_STATUS_6G:
- PMD_DRV_LOG(DEBUG,
- "link speed unsupported gp_status 0x%x",
- speed_mask);
+ ELINK_DEBUG_P1(sc,
+ "link speed unsupported gp_status 0x%x",
+ speed_mask);
return ELINK_STATUS_ERROR;
case ELINK_GP_STATUS_10G_KX4:
@@ -5207,13 +6465,13 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
vars->link_status |= ELINK_LINK_20GTFD;
break;
default:
- PMD_DRV_LOG(DEBUG,
- "link speed unsupported gp_status 0x%x",
- speed_mask);
+ ELINK_DEBUG_P1(sc,
+ "link speed unsupported gp_status 0x%x",
+ speed_mask);
return ELINK_STATUS_ERROR;
}
- } else { /* link_down */
- PMD_DRV_LOG(DEBUG, "phy link down");
+ } else { /* link_down */
+ ELINK_DEBUG_P0(sc, "phy link down");
vars->phy_link_up = 0;
@@ -5221,14 +6479,16 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
vars->mac_type = ELINK_MAC_TYPE_NONE;
}
- PMD_DRV_LOG(DEBUG, " phy_link_up %x line_speed %d",
+ ELINK_DEBUG_P2(sc, " in elink_get_link_speed_duplex vars->link_status = %x, vars->duplex = %x",
+ vars->link_status, vars->duplex);
+ ELINK_DEBUG_P2(sc, " phy_link_up %x line_speed %d",
vars->phy_link_up, vars->line_speed);
return ELINK_STATUS_OK;
}
static uint8_t elink_link_settings_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
@@ -5238,14 +6498,23 @@ static uint8_t elink_link_settings_status(struct elink_phy *phy,
/* Read gp_status */
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status);
- if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) {
duplex = DUPLEX_FULL;
+ ELINK_DEBUG_P1(sc, "duplex status read from phy is = %x",
+ duplex);
+ } else {
+ ELINK_DEBUG_P1(sc, "phy status does not allow interface to be FULL_DUPLEX : %x",
+ gp_status);
+ }
+
+
if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
link_up = 1;
speed_mask = gp_status & ELINK_GP_STATUS_SPEED_MASK;
- PMD_DRV_LOG(DEBUG, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x",
- gp_status, link_up, speed_mask);
+ ELINK_DEBUG_P3(sc, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x",
+ gp_status, link_up, speed_mask);
rc = elink_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
duplex);
if (rc == ELINK_STATUS_ERROR)
@@ -5259,7 +6528,7 @@ static uint8_t elink_link_settings_status(struct elink_phy *phy,
elink_xgxs_an_resolve(phy, params, vars,
gp_status);
}
- } else { /* Link_down */
+ } else { /* Link_down */
if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
ELINK_SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
@@ -5267,7 +6536,7 @@ static uint8_t elink_link_settings_status(struct elink_phy *phy,
}
}
- /* Read LP advertised speeds */
+ /* Read LP advertised speeds*/
if (ELINK_SINGLE_MEDIA_DIRECT(params) &&
(vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) {
uint16_t val;
@@ -5277,61 +6546,69 @@ static uint8_t elink_link_settings_status(struct elink_phy *phy,
if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
CL22_RD_OVER_CL45(sc, phy, MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP1, &val);
if (val & MDIO_OVER_1G_UP1_2_5G)
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
- PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x",
- vars->duplex, vars->flow_ctrl, vars->link_status);
+ ELINK_DEBUG_P3(sc, "duplex %x flow_ctrl 0x%x link_status 0x%x",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t lane;
uint16_t gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
elink_status_t rc = ELINK_STATUS_OK;
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
/* Read gp_status */
- if ((params->loopback_mode) && (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)) {
+ if ((params->loopback_mode) &&
+ (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)) {
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
link_up &= 0x1;
+ ELINK_DEBUG_P1(sc, "params->loopback_mode link_up read = %x",
+ link_up);
} else if ((phy->req_line_speed > ELINK_SPEED_10000) &&
- (phy->supported & ELINK_SUPPORTED_20000baseMLD2_Full)) {
+ (phy->supported & ELINK_SUPPORTED_20000baseMLD2_Full)) {
uint16_t temp_link_up;
- elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &temp_link_up);
- elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &link_up);
- PMD_DRV_LOG(DEBUG, "PCS RX link status = 0x%x-->0x%x",
- temp_link_up, link_up);
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ 1, &temp_link_up);
+ elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ ELINK_DEBUG_P2(sc, "PCS RX link status = 0x%x-->0x%x",
+ temp_link_up, link_up);
link_up &= (1 << 2);
if (link_up)
elink_ext_phy_resolve_fc(phy, params, vars);
} else {
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
- PMD_DRV_LOG(DEBUG, "0x81d1 = 0x%x", gp_status1);
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status1);
+ ELINK_DEBUG_P1(sc, "0x81d1 = 0x%x", gp_status1);
/* Check for either KR, 1G, or AN up. */
link_up = ((gp_status1 >> 8) |
- (gp_status1 >> 12) | (gp_status1)) & (1 << lane);
+ (gp_status1 >> 12) |
+ (gp_status1)) &
+ (1 << lane);
if (phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) {
uint16_t an_link;
elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
@@ -5339,6 +6616,8 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_STATUS, &an_link);
link_up |= (an_link & (1 << 2));
+ ELINK_DEBUG_P2(sc, "an_link = %x, link_up = %x",
+ an_link, link_up);
}
if (link_up && ELINK_SINGLE_MEDIA_DIRECT(params)) {
uint16_t pd, gp_status4;
@@ -5349,7 +6628,7 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
&gp_status4);
if (gp_status4 & ((1 << 12) << lane))
vars->link_status |=
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
/* Check parallel detect used */
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
@@ -5357,13 +6636,19 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
&pd);
if (pd & (1 << 15))
vars->link_status |=
- LINK_STATUS_PARALLEL_DETECTION_USED;
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ ELINK_DEBUG_P2(sc, "pd = %x, link_status = %x",
+ pd, vars->link_status);
}
elink_ext_phy_resolve_fc(phy, params, vars);
vars->duplex = duplex;
+ ELINK_DEBUG_P3(sc, " ELINK_SINGLE_MEDIA_DIRECT duplex %x flow_ctrl 0x%x link_status 0x%x",
+ vars->duplex, vars->flow_ctrl,
+ vars->link_status);
}
}
-
+ ELINK_DEBUG_P3(sc, "duplex %x flow_ctrl 0x%x link_status 0x%x",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) &&
ELINK_SINGLE_MEDIA_DIRECT(params)) {
uint16_t val;
@@ -5373,24 +6658,28 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
-
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ ELINK_DEBUG_P2(sc, "val = %x, link_status = %x",
+ val, vars->link_status);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL3_LP_UP1, &val);
if (val & MDIO_OVER_1G_UP1_2_5G)
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ ELINK_DEBUG_P2(sc, "val = %x, link_status = %x",
+ val, vars->link_status);
}
+
if (lane < 2) {
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
@@ -5398,12 +6687,12 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
}
- PMD_DRV_LOG(DEBUG, "lane %d gp_speed 0x%x", lane, gp_speed);
+ ELINK_DEBUG_P2(sc, "lane %d gp_speed 0x%x", lane, gp_speed);
if ((lane & 1) == 0)
gp_speed <<= 8;
gp_speed &= 0x3f00;
- link_up = ! !link_up;
+ link_up = !!link_up;
/* Reset the TX FIFO to fix SGMII issue */
rc = elink_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
@@ -5414,11 +6703,10 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
(!(phy->flags & ELINK_FLAGS_WC_DUAL_MODE)))
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
- PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x",
- vars->duplex, vars->flow_ctrl, vars->link_status);
+ ELINK_DEBUG_P3(sc, "duplex %x flow_ctrl 0x%x link_status 0x%x",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
-
static void elink_set_gmii_tx_driver(struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
@@ -5429,7 +6717,8 @@ static void elink_set_gmii_tx_driver(struct elink_params *params)
/* Read precomp */
CL22_RD_OVER_CL45(sc, phy,
- MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_LP_UP2, &lp_up2);
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
/* Bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -5440,32 +6729,36 @@ static void elink_set_gmii_tx_driver(struct elink_params *params)
return;
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
- bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
CL22_RD_OVER_CL45(sc, phy,
- bank, MDIO_TX0_TX_DRIVER, &tx_driver);
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
/* Replace tx_driver bits [15:12] */
- if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
+ if (lp_up2 !=
+ (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
CL22_WR_OVER_CL45(sc, phy,
- bank, MDIO_TX0_TX_DRIVER, tx_driver);
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
}
}
}
static elink_status_t elink_emac_program(struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
uint16_t mode = 0;
- PMD_DRV_LOG(DEBUG, "setting link speed & duplex");
+ ELINK_DEBUG_P0(sc, "setting link speed & duplex");
elink_bits_dis(sc, GRCBASE_EMAC0 + port * 0x400 +
EMAC_REG_EMAC_MODE,
(EMAC_MODE_25G_MODE |
- EMAC_MODE_PORT_MII_10M | EMAC_MODE_HALF_DUPLEX));
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
switch (vars->line_speed) {
case ELINK_SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
@@ -5485,14 +6778,16 @@ static elink_status_t elink_emac_program(struct elink_params *params,
default:
/* 10G not valid for EMAC */
- PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x", vars->line_speed);
+ ELINK_DEBUG_P1(sc, "Invalid line_speed 0x%x",
+ vars->line_speed);
return ELINK_STATUS_ERROR;
}
if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
elink_bits_en(sc,
- GRCBASE_EMAC0 + port * 0x400 + EMAC_REG_EMAC_MODE, mode);
+ GRCBASE_EMAC0 + port * 0x400 + EMAC_REG_EMAC_MODE,
+ mode);
elink_set_led(params, vars, ELINK_LED_MODE_OPER, vars->line_speed);
return ELINK_STATUS_OK;
@@ -5509,24 +6804,26 @@ static void elink_set_preemphasis(struct elink_phy *phy,
bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0), i++) {
CL22_WR_OVER_CL45(sc, phy,
bank,
- MDIO_RX0_RX_EQ_BOOST, phy->rx_preemphasis[i]);
+ MDIO_RX0_RX_EQ_BOOST,
+ phy->rx_preemphasis[i]);
}
for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
CL22_WR_OVER_CL45(sc, phy,
bank,
- MDIO_TX0_TX_DRIVER, phy->tx_preemphasis[i]);
+ MDIO_TX0_TX_DRIVER,
+ phy->tx_preemphasis[i]);
}
}
static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
+ struct bnx2x_softc *sc = params->sc;
uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) ||
- (params->loopback_mode == ELINK_LOOPBACK_XGXS));
-
+ (params->loopback_mode == ELINK_LOOPBACK_XGXS));
if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
if (ELINK_SINGLE_MEDIA_DIRECT(params) &&
(params->feature_config_flags &
@@ -5537,7 +6834,7 @@ static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
if (vars->line_speed != ELINK_SPEED_AUTO_NEG ||
(ELINK_SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == ELINK_LOOPBACK_EXT)) {
- PMD_DRV_LOG(DEBUG, "not SGMII, no AN");
+ ELINK_DEBUG_P0(sc, "not SGMII, no AN");
/* Disable autoneg */
elink_set_autoneg(phy, params, vars, 0);
@@ -5545,8 +6842,8 @@ static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
/* Program speed and duplex */
elink_program_serdes(phy, params, vars);
- } else { /* AN_mode */
- PMD_DRV_LOG(DEBUG, "not SGMII, AN");
+ } else { /* AN_mode */
+ ELINK_DEBUG_P0(sc, "not SGMII, AN");
/* AN enabled */
elink_set_brcm_cl37_advertisement(phy, params);
@@ -5562,8 +6859,8 @@ static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
elink_restart_autoneg(phy, params, enable_cl73);
}
- } else { /* SGMII mode */
- PMD_DRV_LOG(DEBUG, "SGMII");
+ } else { /* SGMII mode */
+ ELINK_DEBUG_P0(sc, "SGMII");
elink_initialize_sgmii_process(phy, params, vars);
}
@@ -5572,8 +6869,8 @@ static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
}
static elink_status_t elink_prepare_xgxs(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
elink_status_t rc;
vars->phy_flags |= PHY_XGXS_FLAG;
@@ -5611,28 +6908,32 @@ static elink_status_t elink_prepare_xgxs(struct elink_phy *phy,
}
static uint16_t elink_wait_reset_complete(struct bnx2x_softc *sc,
- struct elink_phy *phy,
- struct elink_params *params)
+ struct elink_phy *phy,
+ struct elink_params *params)
{
uint16_t cnt, ctrl;
/* Wait for soft reset to get cleared up to 1 sec */
for (cnt = 0; cnt < 1000; cnt++) {
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE)
- elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &ctrl);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE)
+ elink_cl22_read(sc, phy,
+ MDIO_PMA_REG_CTRL, &ctrl);
else
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, &ctrl);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1 << 15)))
break;
DELAY(1000 * 1);
}
if (cnt == 1000)
- elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized,"
- // " Port %d",
+ elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED,
+ params->port);
+ /* "Warning: PHY was not initialized,"
+ * " Port %d",
+ */
- PMD_DRV_LOG(DEBUG, "control reg 0x%x (after %d ms)", ctrl, cnt);
+ ELINK_DEBUG_P2(sc, "control reg 0x%x (after %d ms)", ctrl, cnt);
return cnt;
}
@@ -5650,37 +6951,38 @@ static void elink_link_int_enable(struct elink_params *params)
} else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) {
mask = (ELINK_NIG_MASK_XGXS0_LINK10G |
ELINK_NIG_MASK_XGXS0_LINK_STATUS);
- PMD_DRV_LOG(DEBUG, "enabled XGXS interrupt");
+ ELINK_DEBUG_P0(sc, "enabled XGXS interrupt");
if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) &&
- params->phy[ELINK_INT_PHY].type !=
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+ params->phy[ELINK_INT_PHY].type !=
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
mask |= ELINK_NIG_MASK_MI_INT;
- PMD_DRV_LOG(DEBUG, "enabled external phy int");
+ ELINK_DEBUG_P0(sc, "enabled external phy int");
}
- } else { /* SerDes */
+ } else { /* SerDes */
mask = ELINK_NIG_MASK_SERDES0_LINK_STATUS;
- PMD_DRV_LOG(DEBUG, "enabled SerDes interrupt");
+ ELINK_DEBUG_P0(sc, "enabled SerDes interrupt");
if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) &&
- params->phy[ELINK_INT_PHY].type !=
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+ params->phy[ELINK_INT_PHY].type !=
+ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
mask |= ELINK_NIG_MASK_MI_INT;
- PMD_DRV_LOG(DEBUG, "enabled external phy int");
+ ELINK_DEBUG_P0(sc, "enabled external phy int");
}
}
- elink_bits_en(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, mask);
+ elink_bits_en(sc,
+ NIG_REG_MASK_INTERRUPT_PORT0 + port * 4,
+ mask);
- PMD_DRV_LOG(DEBUG, "port %x, is_xgxs %x, int_status 0x%x", port,
- (params->switch_cfg == ELINK_SWITCH_CFG_10G),
- REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
- PMD_DRV_LOG(DEBUG, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x",
- REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
- REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18),
- REG_RD(sc,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
- PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x",
- REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
- REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
+ ELINK_DEBUG_P3(sc, "port %x, is_xgxs %x, int_status 0x%x", port,
+ (params->switch_cfg == ELINK_SWITCH_CFG_10G),
+ REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
+ ELINK_DEBUG_P3(sc, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x",
+ REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
+ REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18),
+ REG_RD(sc, NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
+ ELINK_DEBUG_P2(sc, " 10G %x, XGXS_LINK %x",
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
}
static void elink_rearm_latch_signal(struct bnx2x_softc *sc, uint8_t port,
@@ -5693,17 +6995,20 @@ static void elink_rearm_latch_signal(struct bnx2x_softc *sc, uint8_t port,
* so in this case we need to write the status to clear the XOR
*/
/* Read Latched signals */
- latch_status = REG_RD(sc, NIG_REG_LATCH_STATUS_0 + port * 8);
- PMD_DRV_LOG(DEBUG, "latch_status = 0x%x", latch_status);
- /* Handle only those with latched-signal=up. */
+ latch_status = REG_RD(sc,
+ NIG_REG_LATCH_STATUS_0 + port * 8);
+ ELINK_DEBUG_P1(sc, "latch_status = 0x%x", latch_status);
+ /* Handle only those with latched-signal=up.*/
if (exp_mi_int)
elink_bits_en(sc,
NIG_REG_STATUS_INTERRUPT_PORT0
- + port * 4, ELINK_NIG_STATUS_EMAC0_MI_INT);
+ + port * 4,
+ ELINK_NIG_STATUS_EMAC0_MI_INT);
else
elink_bits_dis(sc,
NIG_REG_STATUS_INTERRUPT_PORT0
- + port * 4, ELINK_NIG_STATUS_EMAC0_MI_INT);
+ + port * 4,
+ ELINK_NIG_STATUS_EMAC0_MI_INT);
if (latch_status & 1) {
@@ -5738,23 +7043,24 @@ static void elink_link_int_ack(struct elink_params *params,
* the relevant lane in the status register
*/
uint32_t ser_lane =
- ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
mask = ((1 << ser_lane) <<
- ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+ ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
} else
mask = ELINK_NIG_STATUS_SERDES0_LINK_STATUS;
}
- PMD_DRV_LOG(DEBUG, "Ack link up interrupt with mask 0x%x",
- mask);
+ ELINK_DEBUG_P1(sc, "Ack link up interrupt with mask 0x%x",
+ mask);
elink_bits_en(sc,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4, mask);
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4,
+ mask);
}
}
-static uint8_t elink_format_ver(uint32_t num, uint8_t * str,
- uint16_t * len)
+static elink_status_t elink_format_ver(uint32_t num, uint8_t *str,
+ uint16_t *len)
{
uint8_t *str_ptr = str;
uint32_t mask = 0xf0000000;
@@ -5792,14 +7098,57 @@ static uint8_t elink_format_ver(uint32_t num, uint8_t * str,
return ELINK_STATUS_OK;
}
-static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
- uint8_t * str, uint16_t * len)
+
+static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
+ uint8_t *str,
+ uint16_t *len)
{
str[0] = '\0';
(*len)--;
return ELINK_STATUS_OK;
}
+elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params,
+ uint8_t *version,
+ uint16_t len)
+{
+ struct bnx2x_softc *sc;
+ uint32_t spirom_ver = 0;
+ elink_status_t status = ELINK_STATUS_OK;
+ uint8_t *ver_p = version;
+ uint16_t remain_len = len;
+ if (version == NULL || params == NULL)
+ return ELINK_STATUS_ERROR;
+ sc = params->sc;
+
+ /* Extract first external phy*/
+ version[0] = '\0';
+ spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY1].ver_addr);
+
+ if (params->phy[ELINK_EXT_PHY1].format_fw_ver) {
+ status |= params->phy[ELINK_EXT_PHY1].format_fw_ver(spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p += (len - remain_len);
+ }
+ if ((params->num_phys == ELINK_MAX_PHYS) &&
+ (params->phy[ELINK_EXT_PHY2].ver_addr != 0)) {
+ spirom_ver = REG_RD(sc, params->phy[ELINK_EXT_PHY2].ver_addr);
+ if (params->phy[ELINK_EXT_PHY2].format_fw_ver) {
+ *ver_p = '/';
+ ver_p++;
+ remain_len--;
+ status |= params->phy[ELINK_EXT_PHY2].format_fw_ver(
+ spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p = version + (len - remain_len);
+ }
+ }
+ *ver_p = '\0';
+ return status;
+}
+
static void elink_set_xgxs_loopback(struct elink_phy *phy,
struct elink_params *params)
{
@@ -5809,7 +7158,7 @@ static void elink_set_xgxs_loopback(struct elink_phy *phy,
if (phy->req_line_speed != ELINK_SPEED_1000) {
uint32_t md_devad = 0;
- PMD_DRV_LOG(DEBUG, "XGXS 10G loopback enable");
+ ELINK_DEBUG_P0(sc, "XGXS 10G loopback enable");
if (!CHIP_IS_E3(sc)) {
/* Change the uni_phy_addr in the nig */
@@ -5823,7 +7172,8 @@ static void elink_set_xgxs_loopback(struct elink_phy *phy,
elink_cl45_write(sc, phy,
5,
(MDIO_REG_BANK_AER_BLOCK +
- (MDIO_AER_BLOCK_AER_REG & 0xf)), 0x2800);
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
elink_cl45_write(sc, phy,
5,
@@ -5841,22 +7191,21 @@ static void elink_set_xgxs_loopback(struct elink_phy *phy,
}
} else {
uint16_t mii_ctrl;
- PMD_DRV_LOG(DEBUG, "XGXS 1G loopback enable");
+ ELINK_DEBUG_P0(sc, "XGXS 1G loopback enable");
elink_cl45_read(sc, phy, 5,
(MDIO_REG_BANK_COMBO_IEEE0 +
- (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
&mii_ctrl);
elink_cl45_write(sc, phy, 5,
(MDIO_REG_BANK_COMBO_IEEE0 +
- (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
mii_ctrl |
MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
}
}
elink_status_t elink_set_led(struct elink_params *params,
- struct elink_vars *vars, uint8_t mode,
- uint32_t speed)
+ struct elink_vars *vars, uint8_t mode, uint32_t speed)
{
uint8_t port = params->port;
uint16_t hw_led_mode = params->hw_led_mode;
@@ -5865,15 +7214,21 @@ elink_status_t elink_set_led(struct elink_params *params,
uint32_t tmp;
uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "elink_set_led: port %x, mode %d", port, mode);
- PMD_DRV_LOG(DEBUG, "speed 0x%x, hw_led_mode 0x%x", speed, hw_led_mode);
+ ELINK_DEBUG_P2(sc, "elink_set_led: port %x, mode %d", port, mode);
+ ELINK_DEBUG_P2(sc, "speed 0x%x, hw_led_mode 0x%x",
+ speed, hw_led_mode);
/* In case */
for (phy_idx = ELINK_EXT_PHY1; phy_idx < ELINK_MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].set_link_led) {
- params->phy[phy_idx].set_link_led(&params->phy[phy_idx],
- params, mode);
+ params->phy[phy_idx].set_link_led(
+ &params->phy[phy_idx], params, mode);
}
}
+#ifdef ELINK_INCLUDE_EMUL
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC)
+ return rc;
+#endif
switch (mode) {
case ELINK_LED_MODE_FRONT_PANEL_OFF:
@@ -5884,10 +7239,10 @@ elink_status_t elink_set_led(struct elink_params *params,
tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED);
if (params->phy[ELINK_EXT_PHY1].type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE)
+ PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE)
tmp &= ~(EMAC_LED_1000MB_OVERRIDE |
- EMAC_LED_100MB_OVERRIDE |
- EMAC_LED_10MB_OVERRIDE);
+ EMAC_LED_100MB_OVERRIDE |
+ EMAC_LED_10MB_OVERRIDE);
else
tmp |= EMAC_LED_OVERRIDE;
@@ -5900,25 +7255,22 @@ elink_status_t elink_set_led(struct elink_params *params,
*/
if (!vars->link_up)
break;
- /* fall-through */
+ /* fallthrough */
case ELINK_LED_MODE_ON:
if (((params->phy[ELINK_EXT_PHY1].type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) ||
- (params->phy[ELINK_EXT_PHY1].type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722)) &&
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) ||
+ (params->phy[ELINK_EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722)) &&
CHIP_IS_E2(sc) && params->num_phys == 2) {
- /* This is a work-around for E2+8727 Configurations */
+ /* This is a work-around for E2 + 8727 Configurations */
if (mode == ELINK_LED_MODE_ON ||
- speed == ELINK_SPEED_10000) {
+ speed == ELINK_SPEED_10000){
REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0);
REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1);
- tmp =
- elink_cb_reg_read(sc,
- emac_base +
- EMAC_REG_EMAC_LED);
- elink_cb_reg_write(sc,
- emac_base +
+ tmp = elink_cb_reg_read(sc, emac_base +
+ EMAC_REG_EMAC_LED);
+ elink_cb_reg_write(sc, emac_base +
EMAC_REG_EMAC_LED,
(tmp | EMAC_LED_OVERRIDE));
/* Return here without enabling traffic
@@ -5934,22 +7286,23 @@ elink_status_t elink_set_led(struct elink_params *params,
* is up in CL73
*/
if ((!CHIP_IS_E3(sc)) ||
- (CHIP_IS_E3(sc) && mode == ELINK_LED_MODE_ON))
+ (CHIP_IS_E3(sc) &&
+ mode == ELINK_LED_MODE_ON))
REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1);
if (CHIP_IS_E1x(sc) ||
- CHIP_IS_E2(sc) || (mode == ELINK_LED_MODE_ON))
+ CHIP_IS_E2(sc) ||
+ (mode == ELINK_LED_MODE_ON))
REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0);
else
REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4,
hw_led_mode);
} else if ((params->phy[ELINK_EXT_PHY1].type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) &&
+ PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE) &&
(mode == ELINK_LED_MODE_ON)) {
REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0);
- tmp =
- elink_cb_reg_read(sc,
- emac_base + EMAC_REG_EMAC_LED);
+ tmp = elink_cb_reg_read(sc, emac_base +
+ EMAC_REG_EMAC_LED);
elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED,
tmp | EMAC_LED_OVERRIDE |
EMAC_LED_1000MB_OVERRIDE);
@@ -5959,11 +7312,10 @@ elink_status_t elink_set_led(struct elink_params *params,
break;
} else {
uint32_t nig_led_mode = ((params->hw_led_mode <<
- SHARED_HW_CFG_LED_MODE_SHIFT)
- ==
- SHARED_HW_CFG_LED_EXTPHY2)
- ? (SHARED_HW_CFG_LED_PHY1 >>
- SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
+ SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY2) ?
+ (SHARED_HW_CFG_LED_PHY1 >>
+ SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4,
nig_led_mode);
}
@@ -5977,26 +7329,133 @@ elink_status_t elink_set_led(struct elink_params *params,
else
REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port * 4,
LED_BLINK_RATE_VAL_E1X_E2);
- REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port * 4, 1);
+ REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+ port * 4, 1);
tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED);
elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED,
- (tmp & (~EMAC_LED_OVERRIDE)));
+ (tmp & (~EMAC_LED_OVERRIDE)));
+ if (CHIP_IS_E1(sc) &&
+ ((speed == ELINK_SPEED_2500) ||
+ (speed == ELINK_SPEED_1000) ||
+ (speed == ELINK_SPEED_100) ||
+ (speed == ELINK_SPEED_10))) {
+ /* For speeds less than 10G LED scheme is different */
+ REG_WR(sc, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port * 4, 1);
+ REG_WR(sc, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+ port * 4, 0);
+ REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+ port * 4, 1);
+ }
break;
default:
rc = ELINK_STATUS_ERROR;
- PMD_DRV_LOG(DEBUG, "elink_set_led: Invalid led mode %d", mode);
+ ELINK_DEBUG_P1(sc, "elink_set_led: Invalid led mode %d",
+ mode);
break;
}
return rc;
}
+/* This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+elink_status_t elink_test_link(struct elink_params *params,
+ __rte_unused struct elink_vars *vars,
+ uint8_t is_serdes)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint16_t gp_status = 0, phy_index = 0;
+ uint8_t ext_phy_link_up = 0, serdes_phy_type;
+ struct elink_vars temp_vars;
+ struct elink_phy *int_phy = &params->phy[ELINK_INT_PHY];
+#ifdef ELINK_INCLUDE_FPGA
+ if (CHIP_REV_IS_FPGA(sc))
+ return ELINK_STATUS_OK;
+#endif
+#ifdef ELINK_INCLUDE_EMUL
+ if (CHIP_REV_IS_EMUL(sc))
+ return ELINK_STATUS_OK;
+#endif
+
+ if (CHIP_IS_E3(sc)) {
+ uint16_t link_up;
+ if (params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)]
+ > ELINK_SPEED_10000) {
+ /* Check 20G link */
+ elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ link_up &= (1 << 2);
+ } else {
+ /* Check 10G link and below*/
+ uint8_t lane = elink_get_warpcore_lane(int_phy, params);
+ elink_cl45_read(sc, int_phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status);
+ gp_status = ((gp_status >> 8) & 0xf) |
+ ((gp_status >> 12) & 0xf);
+ link_up = gp_status & (1 << lane);
+ }
+ if (!link_up)
+ return ELINK_STATUS_NO_LINK;
+ } else {
+ CL22_RD_OVER_CL45(sc, int_phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ /* Link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return ELINK_STATUS_NO_LINK;
+ }
+ /* In XGXS loopback mode, do not check external PHY */
+ if (params->loopback_mode == ELINK_LOOPBACK_XGXS)
+ return ELINK_STATUS_OK;
+
+ switch (params->num_phys) {
+ case 1:
+ /* No external PHY */
+ return ELINK_STATUS_OK;
+ case 2:
+ ext_phy_link_up = params->phy[ELINK_EXT_PHY1].read_status(
+ &params->phy[ELINK_EXT_PHY1],
+ params, &temp_vars);
+ break;
+ case 3: /* Dual Media */
+ for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ serdes_phy_type = ((params->phy[phy_index].media_type ==
+ ELINK_ETH_PHY_SFPP_10G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ELINK_ETH_PHY_SFP_1G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ELINK_ETH_PHY_XFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ELINK_ETH_PHY_DA_TWINAX));
+
+ if (is_serdes != serdes_phy_type)
+ continue;
+ if (params->phy[phy_index].read_status) {
+ ext_phy_link_up |=
+ params->phy[phy_index].read_status(
+ &params->phy[phy_index],
+ params, &temp_vars);
+ }
+ }
+ break;
+ }
+ if (ext_phy_link_up)
+ return ELINK_STATUS_OK;
+ return ELINK_STATUS_NO_LINK;
+}
+
static elink_status_t elink_link_initialize(struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_vars *vars)
{
- elink_status_t rc = ELINK_STATUS_OK;
uint8_t phy_index, non_ext_phy;
struct bnx2x_softc *sc = params->sc;
/* In case of external phy existence, the line speed would be the
@@ -6021,11 +7480,12 @@ static elink_status_t elink_link_initialize(struct elink_params *params,
(params->loopback_mode == ELINK_LOOPBACK_EXT_PHY)) {
struct elink_phy *phy = &params->phy[ELINK_INT_PHY];
if (vars->line_speed == ELINK_SPEED_AUTO_NEG &&
- (CHIP_IS_E1x(sc) || CHIP_IS_E2(sc)))
+ (CHIP_IS_E1x(sc) ||
+ CHIP_IS_E2(sc)))
elink_set_parallel_detection(phy, params);
if (params->phy[ELINK_INT_PHY].config_init)
- params->phy[ELINK_INT_PHY].config_init(phy,
- params, vars);
+ params->phy[ELINK_INT_PHY].config_init(phy, params,
+ vars);
}
/* Re-read this value in case it was changed inside config_init due to
@@ -6033,14 +7493,14 @@ static elink_status_t elink_link_initialize(struct elink_params *params,
*/
vars->line_speed = params->phy[ELINK_INT_PHY].req_line_speed;
- /* Init external phy */
+ /* Init external phy*/
if (non_ext_phy) {
if (params->phy[ELINK_INT_PHY].supported &
ELINK_SUPPORTED_FIBRE)
vars->link_status |= LINK_STATUS_SERDES_LINK;
} else {
for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
- phy_index++) {
+ phy_index++) {
/* No need to initialize second phy in case of first
* phy only selection. In case of second phy, we do
* need to initialize the first phy, since they are
@@ -6053,13 +7513,13 @@ static elink_status_t elink_link_initialize(struct elink_params *params,
if (phy_index == ELINK_EXT_PHY2 &&
(elink_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- PMD_DRV_LOG(DEBUG,
- "Not initializing second phy");
+ ELINK_DEBUG_P0(sc,
+ "Not initializing second phy");
continue;
}
- params->phy[phy_index].config_init(&params->
- phy[phy_index],
- params, vars);
+ params->phy[phy_index].config_init(
+ &params->phy[phy_index],
+ params, vars);
}
}
/* Reset the interrupt indication after phy was initialized */
@@ -6069,7 +7529,7 @@ static elink_status_t elink_link_initialize(struct elink_params *params,
ELINK_NIG_STATUS_XGXS0_LINK_STATUS |
ELINK_NIG_STATUS_SERDES0_LINK_STATUS |
ELINK_NIG_MASK_MI_INT));
- return rc;
+ return ELINK_STATUS_OK;
}
static void elink_int_link_reset(__rte_unused struct elink_phy *phy,
@@ -6091,19 +7551,21 @@ static void elink_common_ext_link_reset(__rte_unused struct elink_phy *phy,
else
gpio_port = params->port;
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
- PMD_DRV_LOG(DEBUG, "reset external PHY");
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ ELINK_DEBUG_P0(sc, "reset external PHY");
}
static elink_status_t elink_update_link_down(struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
- PMD_DRV_LOG(DEBUG, "Port %x: Link is down", port);
+ ELINK_DEBUG_P1(sc, "Port %x: Link is down", port);
elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
/* Indicate no mac active */
@@ -6123,8 +7585,9 @@ static elink_status_t elink_update_link_down(struct elink_params *params,
DELAY(1000 * 10);
/* Reset BigMac/Xmac */
- if (CHIP_IS_E1x(sc) || CHIP_IS_E2(sc))
- elink_set_bmac_rx(sc, params->port, 0);
+ if (CHIP_IS_E1x(sc) ||
+ CHIP_IS_E2(sc))
+ elink_set_bmac_rx(sc, params->chip_id, params->port, 0);
if (CHIP_IS_E3(sc)) {
/* Prevent LPI Generation by chip */
@@ -6144,8 +7607,8 @@ static elink_status_t elink_update_link_down(struct elink_params *params,
}
static elink_status_t elink_update_link_up(struct elink_params *params,
- struct elink_vars *vars,
- uint8_t link_10g)
+ struct elink_vars *vars,
+ uint8_t link_10g)
{
struct bnx2x_softc *sc = params->sc;
uint8_t phy_idx, port = params->port;
@@ -6156,15 +7619,17 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
- vars->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
- vars->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
if (USES_WARPCORE(sc)) {
if (link_10g) {
if (elink_xmac_enable(params, vars, 0) ==
ELINK_STATUS_NO_LINK) {
- PMD_DRV_LOG(DEBUG, "Found errors on XMAC");
+ ELINK_DEBUG_P0(sc, "Found errors on XMAC");
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
vars->link_status &= ~LINK_STATUS_LINK_UP;
@@ -6176,7 +7641,7 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
(vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
- PMD_DRV_LOG(DEBUG, "Enabling LPI assertion");
+ ELINK_DEBUG_P0(sc, "Enabling LPI assertion");
REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
(params->port << 2), 1);
REG_WR(sc, MISC_REG_CPMU_LP_DR_ENABLE, 1);
@@ -6184,11 +7649,12 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
(params->port << 2), 0xfc20);
}
}
- if ((CHIP_IS_E1x(sc) || CHIP_IS_E2(sc))) {
+ if ((CHIP_IS_E1x(sc) ||
+ CHIP_IS_E2(sc))) {
if (link_10g) {
if (elink_bmac_enable(params, vars, 0, 1) ==
ELINK_STATUS_NO_LINK) {
- PMD_DRV_LOG(DEBUG, "Found errors on BMAC");
+ ELINK_DEBUG_P0(sc, "Found errors on BMAC");
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
vars->link_status &= ~LINK_STATUS_LINK_UP;
@@ -6231,6 +7697,24 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
return rc;
}
+static void elink_chng_link_count(struct elink_params *params, uint8_t clear)
+{
+ struct bnx2x_softc *sc = params->sc;
+ uint32_t addr, val;
+
+ /* Verify the link_change_count is supported by the MFW */
+ if (!(SHMEM2_HAS(sc, link_change_count)))
+ return;
+
+ addr = params->shmem2_base +
+ offsetof(struct shmem2_region, link_change_count[params->port]);
+ if (clear)
+ val = 0;
+ else
+ val = REG_RD(sc, addr) + 1;
+ REG_WR(sc, addr, val);
+}
+
/* The elink_link_update function should be called upon link
* interrupt.
* Link is considered up as follows:
@@ -6243,24 +7727,24 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
* external phy needs to be up, and at least one of the 2
* external phy link must be up.
*/
-elink_status_t elink_link_update(struct elink_params * params,
- struct elink_vars * vars)
+elink_status_t elink_link_update(struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
struct elink_vars phy_vars[ELINK_MAX_PHYS];
uint8_t port = params->port;
uint8_t link_10g_plus, phy_index;
+ uint32_t prev_link_status = vars->link_status;
uint8_t ext_phy_link_up = 0, cur_link_up;
elink_status_t rc = ELINK_STATUS_OK;
- __rte_unused uint8_t is_mi_int = 0;
uint16_t ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
uint8_t active_external_phy = ELINK_INT_PHY;
vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
vars->link_status &= ~ELINK_LINK_UPDATE_MASK;
for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys;
- phy_index++) {
+ phy_index++) {
phy_vars[phy_index].flow_ctrl = 0;
- phy_vars[phy_index].link_status = ETH_LINK_DOWN;
+ phy_vars[phy_index].link_status = 0;
phy_vars[phy_index].line_speed = 0;
phy_vars[phy_index].duplex = DUPLEX_FULL;
phy_vars[phy_index].phy_link_up = 0;
@@ -6273,21 +7757,18 @@ elink_status_t elink_link_update(struct elink_params * params,
if (USES_WARPCORE(sc))
elink_set_aer_mmd(params, &params->phy[ELINK_INT_PHY]);
- PMD_DRV_LOG(DEBUG, "port %x, XGXS?%x, int_status 0x%x",
- port, (vars->phy_flags & PHY_XGXS_FLAG),
- REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
+ ELINK_DEBUG_P3(sc, "port %x, XGXS?%x, int_status 0x%x",
+ port, (vars->phy_flags & PHY_XGXS_FLAG),
+ REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
- is_mi_int = (uint8_t) (REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
- port * 0x18) > 0);
- PMD_DRV_LOG(DEBUG, "int_mask 0x%x MI_INT %x, SERDES_LINK %x",
- REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
- is_mi_int,
- REG_RD(sc,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
+ ELINK_DEBUG_P3(sc, "int_mask 0x%x MI_INT %x, SERDES_LINK %x",
+ REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
+ REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18) > 0,
+ REG_RD(sc, NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
- PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x",
- REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
- REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
+ ELINK_DEBUG_P2(sc, " 10G %x, XGXS_LINK %x",
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
+ REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
/* Disable emac */
if (!CHIP_IS_E3(sc))
@@ -6301,7 +7782,7 @@ elink_status_t elink_link_update(struct elink_params * params,
* speed/duplex result
*/
for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
- phy_index++) {
+ phy_index++) {
struct elink_phy *phy = &params->phy[phy_index];
if (!phy->read_status)
continue;
@@ -6309,11 +7790,11 @@ elink_status_t elink_link_update(struct elink_params * params,
cur_link_up = phy->read_status(phy, params,
&phy_vars[phy_index]);
if (cur_link_up) {
- PMD_DRV_LOG(DEBUG, "phy in index %d link is up",
- phy_index);
+ ELINK_DEBUG_P1(sc, "phy in index %d link is up",
+ phy_index);
} else {
- PMD_DRV_LOG(DEBUG, "phy in index %d link is down",
- phy_index);
+ ELINK_DEBUG_P1(sc, "phy in index %d link is down",
+ phy_index);
continue;
}
@@ -6324,30 +7805,30 @@ elink_status_t elink_link_update(struct elink_params * params,
switch (elink_phy_selection(params)) {
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
- /* In this option, the first PHY makes sure to pass the
- * traffic through itself only.
- * Its not clear how to reset the link on the second phy
- */
+ /* In this option, the first PHY makes sure to pass the
+ * traffic through itself only.
+ * Its not clear how to reset the link on the second phy
+ */
active_external_phy = ELINK_EXT_PHY1;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
- /* In this option, the first PHY makes sure to pass the
- * traffic through the second PHY.
- */
+ /* In this option, the first PHY makes sure to pass the
+ * traffic through the second PHY.
+ */
active_external_phy = ELINK_EXT_PHY2;
break;
default:
- /* Link indication on both PHYs with the following cases
- * is invalid:
- * - FIRST_PHY means that second phy wasn't initialized,
- * hence its link is expected to be down
- * - SECOND_PHY means that first phy should not be able
- * to link up by itself (using configuration)
- * - DEFAULT should be overridden during initialization
- */
- PMD_DRV_LOG(DEBUG, "Invalid link indication"
- "mpc=0x%x. DISABLING LINK !!!",
- params->multi_phy_config);
+ /* Link indication on both PHYs with the following cases
+ * is invalid:
+ * - FIRST_PHY means that second phy wasn't initialized,
+ * hence its link is expected to be down
+ * - SECOND_PHY means that first phy should not be able
+ * to link up by itself (using configuration)
+ * - DEFAULT should be overridden during initialiazation
+ */
+ ELINK_DEBUG_P1(sc, "Invalid link indication"
+ " mpc=0x%x. DISABLING LINK !!!",
+ params->multi_phy_config);
ext_phy_link_up = 0;
break;
}
@@ -6361,9 +7842,9 @@ elink_status_t elink_link_update(struct elink_params * params,
* external phy
*/
if (params->phy[ELINK_INT_PHY].read_status)
- params->phy[ELINK_INT_PHY].read_status(&params->
- phy[ELINK_INT_PHY],
- params, vars);
+ params->phy[ELINK_INT_PHY].read_status(
+ &params->phy[ELINK_INT_PHY],
+ params, vars);
/* The INT_PHY flow control reside in the vars. This include the
* case where the speed or flow control are not set to AUTO.
* Otherwise, the active external phy flow control result is set
@@ -6383,11 +7864,11 @@ elink_status_t elink_link_update(struct elink_params * params,
*/
if (active_external_phy == ELINK_EXT_PHY1) {
if (params->phy[ELINK_EXT_PHY2].phy_specific_func) {
- PMD_DRV_LOG(DEBUG, "Disabling TX on EXT_PHY2");
- params->phy[ELINK_EXT_PHY2].
- phy_specific_func(&params->
- phy[ELINK_EXT_PHY2],
- params, ELINK_DISABLE_TX);
+ ELINK_DEBUG_P0(sc,
+ "Disabling TX on EXT_PHY2");
+ params->phy[ELINK_EXT_PHY2].phy_specific_func(
+ &params->phy[ELINK_EXT_PHY2],
+ params, ELINK_DISABLE_TX);
}
}
@@ -6401,12 +7882,27 @@ elink_status_t elink_link_update(struct elink_params * params,
vars->eee_status = phy_vars[active_external_phy].eee_status;
- PMD_DRV_LOG(DEBUG, "Active external phy selected: %x",
- active_external_phy);
- }
+ ELINK_DEBUG_P1(sc, "Active external phy selected: %x",
+ active_external_phy);
+ }
+
+ ELINK_DEBUG_P3(sc, "vars : phy_flags = %x, mac_type = %x, phy_link_up = %x",
+ vars->phy_flags, vars->mac_type, vars->phy_link_up);
+ ELINK_DEBUG_P3(sc, "vars : link_up = %x, line_speed = %x, duplex = %x",
+ vars->link_up, vars->line_speed, vars->duplex);
+ ELINK_DEBUG_P3(sc, "vars : flow_ctrl = %x, ieee_fc = %x, link_status = %x",
+ vars->flow_ctrl, vars->ieee_fc, vars->link_status);
+ ELINK_DEBUG_P3(sc, "vars : eee_status = %x, fault_detected = %x, check_kr2_recovery_cnt = %x",
+ vars->eee_status, vars->fault_detected,
+ vars->check_kr2_recovery_cnt);
+ ELINK_DEBUG_P3(sc, "vars : periodic_flags = %x, aeu_int_mask = %x, rx_tx_asic_rst = %x",
+ vars->periodic_flags, vars->aeu_int_mask,
+ vars->rx_tx_asic_rst);
+ ELINK_DEBUG_P2(sc, "vars : turn_to_run_wc_rt = %x, rsrv2 = %x",
+ vars->turn_to_run_wc_rt, vars->rsrv2);
for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
- phy_index++) {
+ phy_index++) {
if (params->phy[phy_index].flags &
ELINK_FLAGS_REARM_LATCH_SIGNAL) {
elink_rearm_latch_signal(sc, port,
@@ -6415,9 +7911,9 @@ elink_status_t elink_link_update(struct elink_params * params,
break;
}
}
- PMD_DRV_LOG(DEBUG, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
- " ext_phy_line_speed = %d", vars->flow_ctrl,
- vars->link_status, ext_phy_line_speed);
+ ELINK_DEBUG_P3(sc, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ " ext_phy_line_speed = %d", vars->flow_ctrl,
+ vars->link_status, ext_phy_line_speed);
/* Upon link speed change set the NIG into drain mode. Comes to
* deals with possible FIFO glitch due to clk change when speed
* is decreased without link down indicator
@@ -6426,15 +7922,15 @@ elink_status_t elink_link_update(struct elink_params * params,
if (vars->phy_link_up) {
if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
(ext_phy_line_speed != vars->line_speed)) {
- PMD_DRV_LOG(DEBUG, "Internal link speed %d is"
- " different than the external"
- " link speed %d", vars->line_speed,
- ext_phy_line_speed);
+ ELINK_DEBUG_P2(sc, "Internal link speed %d is"
+ " different than the external"
+ " link speed %d", vars->line_speed,
+ ext_phy_line_speed);
vars->phy_link_up = 0;
+ ELINK_DEBUG_P0(sc, "phy_link_up set to 0");
} else if (prev_line_speed != vars->line_speed) {
- REG_WR(sc,
- NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4,
- 0);
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE +
+ params->port * 4, 0);
DELAY(1000 * 1);
}
}
@@ -6452,11 +7948,11 @@ elink_status_t elink_link_update(struct elink_params * params,
* initialize it
*/
if (!(ELINK_SINGLE_MEDIA_DIRECT(params))) {
- PMD_DRV_LOG(DEBUG, "ext_phy_link_up = %d, int_link_up = %d,"
- " init_preceding = %d", ext_phy_link_up,
- vars->phy_link_up,
- params->phy[ELINK_EXT_PHY1].flags &
- ELINK_FLAGS_INIT_XGXS_FIRST);
+ ELINK_DEBUG_P3(sc, "ext_phy_link_up = %d, int_link_up = %d,"
+ " init_preceding = %d", ext_phy_link_up,
+ vars->phy_link_up,
+ params->phy[ELINK_EXT_PHY1].flags &
+ ELINK_FLAGS_INIT_XGXS_FIRST);
if (!(params->phy[ELINK_EXT_PHY1].flags &
ELINK_FLAGS_INIT_XGXS_FIRST)
&& ext_phy_link_up && !vars->phy_link_up) {
@@ -6467,11 +7963,9 @@ elink_status_t elink_link_update(struct elink_params * params,
vars->phy_flags &= ~PHY_SGMII_FLAG;
if (params->phy[ELINK_INT_PHY].config_init)
- params->phy[ELINK_INT_PHY].config_init(&params->
- phy
- [ELINK_INT_PHY],
- params,
- vars);
+ params->phy[ELINK_INT_PHY].config_init(
+ &params->phy[ELINK_INT_PHY], params,
+ vars);
}
}
/* Link is up only if both local phy and external phy (in case of
@@ -6482,6 +7976,11 @@ elink_status_t elink_link_update(struct elink_params * params,
ELINK_SINGLE_MEDIA_DIRECT(params)) &&
(phy_vars[active_external_phy].fault_detected == 0));
+ if (vars->link_up)
+ ELINK_DEBUG_P0(sc, "local phy and external phy are up");
+ else
+ ELINK_DEBUG_P0(sc, "either local phy or external phy or both are down");
+
/* Update the PFC configuration in case it was changed */
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
vars->link_status |= LINK_STATUS_PFC_ENABLED;
@@ -6493,9 +7992,12 @@ elink_status_t elink_link_update(struct elink_params * params,
else
rc = elink_update_link_down(params, vars);
+ if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP)
+ elink_chng_link_count(params, 0);
+
/* Update MCP link status was changed */
- if (params->
- feature_config_flags & ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX)
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX)
elink_cb_fw_command(sc, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
return rc;
@@ -6504,28 +8006,28 @@ elink_status_t elink_link_update(struct elink_params * params,
/*****************************************************************************/
/* External Phy section */
/*****************************************************************************/
-static void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port)
+void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port)
{
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
DELAY(1000 * 1);
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
-static void elink_save_spirom_version(struct bnx2x_softc *sc,
- __rte_unused uint8_t port,
+static void elink_save_spirom_version(struct bnx2x_softc *sc, uint8_t port,
uint32_t spirom_ver, uint32_t ver_addr)
{
- PMD_DRV_LOG(DEBUG, "FW version 0x%x:0x%x for port %d",
- (uint16_t) (spirom_ver >> 16), (uint16_t) spirom_ver, port);
+ ELINK_DEBUG_P3(sc, "FW version 0x%x:0x%x for port %d",
+ (uint16_t)(spirom_ver >> 16), (uint16_t)spirom_ver, port);
if (ver_addr)
REG_WR(sc, ver_addr, spirom_ver);
}
static void elink_save_bnx2x_spirom_ver(struct bnx2x_softc *sc,
- struct elink_phy *phy, uint8_t port)
+ struct elink_phy *phy,
+ uint8_t port)
{
uint16_t fw_ver1, fw_ver2;
@@ -6533,18 +8035,21 @@ static void elink_save_bnx2x_spirom_ver(struct bnx2x_softc *sc,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2, &fw_ver2);
- elink_save_spirom_version(sc, port,
- (uint32_t) (fw_ver1 << 16 | fw_ver2),
+ elink_save_spirom_version(sc, port, (uint32_t)(fw_ver1 << 16 | fw_ver2),
phy->ver_addr);
}
static void elink_ext_phy_10G_an_resolve(struct bnx2x_softc *sc,
- struct elink_phy *phy,
- struct elink_vars *vars)
+ struct elink_phy *phy,
+ struct elink_vars *vars)
{
uint16_t val;
- elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val);
- elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
if (val & (1 << 5))
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
if ((val & (1 << 0)) == 0)
@@ -6568,8 +8073,8 @@ static void elink_8073_resolve_fc(struct elink_phy *phy,
if (elink_ext_phy_resolve_fc(phy, params, vars) &&
(vars->flow_ctrl == ELINK_FLOW_CTRL_NONE)) {
uint16_t pause_result;
- uint16_t ld_pause; /* local */
- uint16_t lp_pause; /* link partner */
+ uint16_t ld_pause; /* local */
+ uint16_t lp_pause; /* link partner */
elink_cl45_read(sc, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_CL37_FC_LD, &ld_pause);
@@ -6582,31 +8087,35 @@ static void elink_8073_resolve_fc(struct elink_phy *phy,
pause_result |= (lp_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
- elink_pause_resolve(vars, pause_result);
- PMD_DRV_LOG(DEBUG, "Ext PHY CL37 pause result 0x%x",
- pause_result);
+ elink_pause_resolve(phy, params, vars, pause_result);
+ ELINK_DEBUG_P1(sc, "Ext PHY CL37 pause result 0x%x",
+ pause_result);
}
}
-
static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc,
- struct elink_phy *phy,
- uint8_t port)
+ struct elink_phy *phy,
+ uint8_t port)
{
uint32_t count = 0;
- uint16_t fw_ver1 = 0, fw_msgout;
+ uint16_t fw_ver1, fw_msgout;
elink_status_t rc = ELINK_STATUS_OK;
/* Boot port from external ROM */
/* EDC grst */
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
/* Ucode reboot and rst */
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x008c);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
elink_cl45_write(sc, phy,
@@ -6627,10 +8136,10 @@ static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc,
do {
count++;
if (count > 300) {
- PMD_DRV_LOG(DEBUG,
- "elink_8073_8727_external_rom_boot port %x:"
- "Download failed. fw version = 0x%x",
- port, fw_ver1);
+ ELINK_DEBUG_P2(sc,
+ "elink_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x",
+ port, fw_ver1);
rc = ELINK_STATUS_ERROR;
break;
}
@@ -6644,17 +8153,19 @@ static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc,
DELAY(1000 * 1);
} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
- ((fw_msgout & 0xff) != 0x03 && (phy->type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073)));
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073)));
/* Clear ser_boot_ctl bit */
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
elink_save_bnx2x_spirom_ver(sc, phy, port);
- PMD_DRV_LOG(DEBUG,
- "elink_8073_8727_external_rom_boot port %x:"
- "Download complete. fw version = 0x%x", port, fw_ver1);
+ ELINK_DEBUG_P2(sc,
+ "elink_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x",
+ port, fw_ver1);
return rc;
}
@@ -6668,22 +8179,25 @@ static elink_status_t elink_8073_is_snr_needed(struct bnx2x_softc *sc,
/* This is only required for 8073A1, version 102 only */
uint16_t val;
- /* Read 8073 HW revision */
+ /* Read 8073 HW revision*/
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val != 1) {
/* No need to workaround in 8073 A1 */
return ELINK_STATUS_OK;
}
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, &val);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
/* SNR should be applied only for version 0x102 */
if (val != 0x102)
return ELINK_STATUS_OK;
- return ELINK_STATUS_ERROR;
+ return 1;
}
static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
@@ -6692,7 +8206,8 @@ static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
uint16_t val, cnt, cnt1;
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val > 0) {
/* No need to workaround in 8073 A1 */
@@ -6707,16 +8222,17 @@ static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
for (cnt = 0; cnt < 1000; cnt++) {
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &val);
- /* If bit [14] = 0 or bit [13] = 0, continue on with
- * system initialization (XAUI work-around not required, as
- * these bits indicate 2.5G or 1G link up).
- */
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /* If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
if (!(val & (1 << 14)) || !(val & (1 << 13))) {
- PMD_DRV_LOG(DEBUG, "XAUI work-around not required");
+ ELINK_DEBUG_P0(sc, "XAUI work-around not required");
return ELINK_STATUS_OK;
} else if (!(val & (1 << 15))) {
- PMD_DRV_LOG(DEBUG, "bit 15 went off");
+ ELINK_DEBUG_P0(sc, "bit 15 went off");
/* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
* MSB (bit15) goes to 1 (indicating that the XAUI
* workaround has completed), then continue on with
@@ -6724,12 +8240,11 @@ static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
*/
for (cnt1 = 0; cnt1 < 1000; cnt1++) {
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_XAUI_WA,
- &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_XAUI_WA, &val);
if (val & (1 << 15)) {
- PMD_DRV_LOG(DEBUG,
- "XAUI workaround has completed");
+ ELINK_DEBUG_P0(sc,
+ "XAUI workaround has completed");
return ELINK_STATUS_OK;
}
DELAY(1000 * 3);
@@ -6738,19 +8253,21 @@ static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
}
DELAY(1000 * 3);
}
- PMD_DRV_LOG(DEBUG, "Warning: XAUI work-around timeout !!!");
+ ELINK_DEBUG_P0(sc, "Warning: XAUI work-around timeout !!!");
return ELINK_STATUS_ERROR;
}
static void elink_807x_force_10G(struct bnx2x_softc *sc, struct elink_phy *phy)
{
/* Force KR or KX */
- elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_BNX2X_CTRL, 0x0000);
- elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
}
static void elink_8073_set_pause_cl37(struct elink_params *params,
@@ -6766,21 +8283,22 @@ static void elink_8073_set_pause_cl37(struct elink_params *params,
/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
}
if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
}
if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
}
- PMD_DRV_LOG(DEBUG, "Ext phy AN advertize cl37 0x%x", cl37_val);
+ ELINK_DEBUG_P1(sc,
+ "Ext phy AN advertize cl37 0x%x", cl37_val);
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
@@ -6798,31 +8316,31 @@ static void elink_8073_specific_func(struct elink_phy *phy,
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
(1 << 2));
- elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
- 0x0004);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
break;
}
}
static uint8_t elink_8073_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val = 0, tmp1;
uint8_t gpio_port;
- PMD_DRV_LOG(DEBUG, "Init 8073");
+ ELINK_DEBUG_P0(sc, "Init 8073");
if (CHIP_IS_E2(sc))
gpio_port = SC_PATH(sc);
else
gpio_port = params->port;
- /* Restore normal power mode */
+ /* Restore normal power mode*/
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
elink_8073_specific_func(phy, params, ELINK_PHY_INIT);
elink_8073_set_pause_cl37(params, phy, vars);
@@ -6830,14 +8348,15 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- PMD_DRV_LOG(DEBUG, "Before rom RX_ALARM(port1): 0x%x", tmp1);
+ ELINK_DEBUG_P1(sc, "Before rom RX_ALARM(port1): 0x%x", tmp1);
/* Swap polarity if required - Must be done only in non-1G mode */
if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
/* Configure the 8073 to swap _P and _N of the KR lines */
- PMD_DRV_LOG(DEBUG, "Swapping polarity for the 8073");
+ ELINK_DEBUG_P0(sc, "Swapping polarity for the 8073");
/* 10G Rx/Tx and 1G Tx signal polarity swap */
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
@@ -6848,31 +8367,33 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
(val | (3 << 9)));
}
+
/* Enable CL37 BAM */
if (REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- default_cfg)) &
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
elink_cl45_read(sc, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, &val);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
elink_cl45_write(sc, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, val | 1);
- PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR");
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ ELINK_DEBUG_P0(sc, "Enable CL37 BAM on KR");
}
if (params->loopback_mode == ELINK_LOOPBACK_EXT) {
elink_807x_force_10G(sc, phy);
- PMD_DRV_LOG(DEBUG, "Forced speed 10G on 807X");
+ ELINK_DEBUG_P0(sc, "Forced speed 10G on 807X");
return ELINK_STATUS_OK;
} else {
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_BNX2X_CTRL, 0x0002);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
}
if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) {
if (phy->req_line_speed == ELINK_SPEED_10000) {
val = (1 << 7);
- } else if (phy->req_line_speed == ELINK_SPEED_2500) {
+ } else if (phy->req_line_speed == ELINK_SPEED_2500) {
val = (1 << 5);
/* Note that 2.5G works only when used with 1G
* advertisement
@@ -6881,15 +8402,16 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
val = (1 << 5);
} else {
val = 0;
- if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1 << 7);
/* Note that 2.5G works only when used with 1G advertisement */
if (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
val |= (1 << 5);
- PMD_DRV_LOG(DEBUG, "807x autoneg val = 0x%x", val);
+ ELINK_DEBUG_P1(sc, "807x autoneg val = 0x%x", val);
}
elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
@@ -6903,13 +8425,13 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
&phy_ver);
- PMD_DRV_LOG(DEBUG, "Add 2.5G");
+ ELINK_DEBUG_P0(sc, "Add 2.5G");
if (phy_ver > 0)
tmp1 |= 1;
else
tmp1 &= 0xfffe;
} else {
- PMD_DRV_LOG(DEBUG, "Disable 2.5G");
+ ELINK_DEBUG_P0(sc, "Disable 2.5G");
tmp1 &= 0xfffe;
}
@@ -6943,14 +8465,14 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
/* Restart autoneg */
DELAY(1000 * 500);
elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
- PMD_DRV_LOG(DEBUG, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x",
- ((val & (1 << 5)) > 0), ((val & (1 << 7)) > 0));
+ ELINK_DEBUG_P2(sc, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x",
+ ((val & (1 << 5)) > 0), ((val & (1 << 7)) > 0));
return ELINK_STATUS_OK;
}
static uint8_t elink_8073_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t link_up = 0;
@@ -6958,33 +8480,41 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
uint16_t link_status = 0;
uint16_t an1000_status = 0;
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
- PMD_DRV_LOG(DEBUG, "8703 LASI status 0x%x", val1);
+ ELINK_DEBUG_P1(sc, "8703 LASI status 0x%x", val1);
/* Clear the interrupt LASI status register */
- elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
- elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "807x PCS status 0x%x->0x%x", val2, val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+ ELINK_DEBUG_P2(sc, "807x PCS status 0x%x->0x%x", val2, val1);
/* Clear MSG-OUT */
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
/* Check the LASI */
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
- PMD_DRV_LOG(DEBUG, "KR 0x9003 0x%x", val2);
+ ELINK_DEBUG_P1(sc, "KR 0x9003 0x%x", val2);
/* Check the link status */
- elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
- PMD_DRV_LOG(DEBUG, "KR PCS status 0x%x", val2);
+ elink_cl45_read(sc, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ ELINK_DEBUG_P1(sc, "KR PCS status 0x%x", val2);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
link_up = ((val1 & 4) == 4);
- PMD_DRV_LOG(DEBUG, "PMA_REG_STATUS=0x%x", val1);
+ ELINK_DEBUG_P1(sc, "PMA_REG_STATUS=0x%x", val1);
- if (link_up && ((phy->req_line_speed != ELINK_SPEED_10000))) {
+ if (link_up &&
+ ((phy->req_line_speed != ELINK_SPEED_10000))) {
if (elink_8073_xaui_wa(sc, phy) != 0)
return 0;
}
@@ -6994,10 +8524,12 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
/* Check the link status on 1.1.2 */
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "KR PMA status 0x%x->0x%x,"
- "an_link_status=0x%x", val2, val1, an1000_status);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ ELINK_DEBUG_P3(sc, "KR PMA status 0x%x->0x%x,"
+ "an_link_status=0x%x", val2, val1, an1000_status);
link_up = (((val1 & 4) == 4) || (an1000_status & (1 << 1)));
if (link_up && elink_8073_is_snr_needed(sc, phy)) {
@@ -7022,27 +8554,28 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_10000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G",
- params->port);
+ ELINK_DEBUG_P1(sc, "port %x: External link up in 10G",
+ params->port);
} else if ((link_status & (1 << 1)) && (!(link_status & (1 << 14)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_2500;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 2.5G",
- params->port);
+ ELINK_DEBUG_P1(sc, "port %x: External link up in 2.5G",
+ params->port);
} else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_1000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G",
- params->port);
+ ELINK_DEBUG_P1(sc, "port %x: External link up in 1G",
+ params->port);
} else {
link_up = 0;
- PMD_DRV_LOG(DEBUG, "port %x: External link is down",
- params->port);
+ ELINK_DEBUG_P1(sc, "port %x: External link is down",
+ params->port);
}
if (link_up) {
/* Swap polarity if required */
- if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ if (params->lane_config &
+ PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
/* Configure the 8073 to swap P and N of the KR lines */
elink_cl45_read(sc, phy,
MDIO_XS_DEVAD,
@@ -7051,15 +8584,16 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
* when it`s in 10G mode.
*/
if (vars->line_speed == ELINK_SPEED_1000) {
- PMD_DRV_LOG(DEBUG, "Swapping 1G polarity for"
- "the 8073");
+ ELINK_DEBUG_P0(sc, "Swapping 1G polarity for"
+ " the 8073");
val1 |= (1 << 3);
} else
val1 &= ~(1 << 3);
elink_cl45_write(sc, phy,
MDIO_XS_DEVAD,
- MDIO_XS_REG_8073_RX_CTRL_PCIE, val1);
+ MDIO_XS_REG_8073_RX_CTRL_PCIE,
+ val1);
}
elink_ext_phy_10G_an_resolve(sc, phy, vars);
elink_8073_resolve_fc(phy, params, vars);
@@ -7072,10 +8606,10 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
if (val1 & (1 << 5))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
if (val1 & (1 << 7))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
return link_up;
@@ -7090,25 +8624,25 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy,
gpio_port = SC_PATH(sc);
else
gpio_port = params->port;
- PMD_DRV_LOG(DEBUG, "Setting 8073 port %d into low power mode",
- gpio_port);
+ ELINK_DEBUG_P1(sc, "Setting 8073 port %d into low power mode",
+ gpio_port);
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
}
/******************************************************************/
/* BNX2X8705 PHY SECTION */
/******************************************************************/
static uint8_t elink_8705_config_init(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars
- *vars)
+ struct elink_params *params,
+ __rte_unused struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "init 8705");
- /* Restore normal power mode */
+ ELINK_DEBUG_P0(sc, "init 8705");
+ /* Restore normal power mode*/
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
elink_ext_phy_hw_reset(sc, params->port);
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
@@ -7120,36 +8654,40 @@ static uint8_t elink_8705_config_init(struct elink_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
- elink_cl45_write(sc, phy, MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+ elink_cl45_write(sc, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
/* BNX2X8705 doesn't have microcode, hence the 0 */
elink_save_spirom_version(sc, params->port, params->shmem_base, 0);
return ELINK_STATUS_OK;
}
static uint8_t elink_8705_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint8_t link_up = 0;
uint16_t val1, rx_sd;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "read status 8705");
+ ELINK_DEBUG_P0(sc, "read status 8705");
elink_cl45_read(sc, phy,
- MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1);
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ ELINK_DEBUG_P1(sc, "8705 LASI status 0x%x", val1);
elink_cl45_read(sc, phy,
- MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1);
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ ELINK_DEBUG_P1(sc, "8705 LASI status 0x%x", val1);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
- PMD_DRV_LOG(DEBUG, "8705 1.c809 val=0x%x", val1);
- link_up = ((rx_sd & 0x1) && (val1 & (1 << 9))
- && ((val1 & (1 << 8)) == 0));
+ ELINK_DEBUG_P1(sc, "8705 1.c809 val=0x%x", val1);
+ link_up = ((rx_sd & 0x1) && (val1 & (1 << 9)) &&
+ ((val1 & (1 << 8)) == 0));
if (link_up) {
vars->line_speed = ELINK_SPEED_10000;
elink_ext_phy_resolve_fc(phy, params, vars);
@@ -7170,17 +8708,17 @@ static void elink_set_disable_pmd_transmit(struct elink_params *params,
*/
if (pmd_dis) {
if (params->feature_config_flags &
- ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) {
- PMD_DRV_LOG(DEBUG, "Disabling PMD transmitter");
+ ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) {
+ ELINK_DEBUG_P0(sc, "Disabling PMD transmitter");
} else {
- PMD_DRV_LOG(DEBUG, "NOT disabling PMD transmitter");
+ ELINK_DEBUG_P0(sc, "NOT disabling PMD transmitter");
return;
}
- } else {
- PMD_DRV_LOG(DEBUG, "Enabling PMD transmitter");
- }
+ } else
+ ELINK_DEBUG_P0(sc, "Enabling PMD transmitter");
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, pmd_dis);
}
static uint8_t elink_get_gpio_port(struct elink_params *params)
@@ -7188,37 +8726,38 @@ static uint8_t elink_get_gpio_port(struct elink_params *params)
uint8_t gpio_port;
uint32_t swap_val, swap_override;
struct bnx2x_softc *sc = params->sc;
- if (CHIP_IS_E2(sc)) {
+ if (CHIP_IS_E2(sc))
gpio_port = SC_PATH(sc);
- } else {
+ else
gpio_port = params->port;
- }
swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
return gpio_port ^ (swap_val && swap_override);
}
static void elink_sfp_e1e2_set_transmitter(struct elink_params *params,
- struct elink_phy *phy, uint8_t tx_en)
+ struct elink_phy *phy,
+ uint8_t tx_en)
{
uint16_t val;
uint8_t port = params->port;
struct bnx2x_softc *sc = params->sc;
uint32_t tx_en_mode;
- /* Disable/Enable transmitter ( TX laser of the SFP+ module.) */
+ /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
tx_en_mode = REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[port].sfp_ctrl)) &
- PORT_HW_CFG_TX_LASER_MASK;
- PMD_DRV_LOG(DEBUG, "Setting transmitter tx_en=%x for port %x "
- "mode = %x", tx_en, port, tx_en_mode);
+ PORT_HW_CFG_TX_LASER_MASK;
+ ELINK_DEBUG_P3(sc, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x", tx_en, port, tx_en_mode);
switch (tx_en_mode) {
case PORT_HW_CFG_TX_LASER_MDIO:
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &val);
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
if (tx_en)
val &= ~(1 << 15);
@@ -7227,36 +8766,38 @@ static void elink_sfp_e1e2_set_transmitter(struct elink_params *params,
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, val);
- break;
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
case PORT_HW_CFG_TX_LASER_GPIO0:
case PORT_HW_CFG_TX_LASER_GPIO1:
case PORT_HW_CFG_TX_LASER_GPIO2:
case PORT_HW_CFG_TX_LASER_GPIO3:
- {
- uint16_t gpio_pin;
- uint8_t gpio_port, gpio_mode;
- if (tx_en)
- gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
- else
- gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+ {
+ uint16_t gpio_pin;
+ uint8_t gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
- gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
- gpio_port = elink_get_gpio_port(params);
- elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
- break;
- }
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = elink_get_gpio_port(params);
+ elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
default:
- PMD_DRV_LOG(DEBUG, "Invalid TX_LASER_MDIO 0x%x", tx_en_mode);
+ ELINK_DEBUG_P1(sc, "Invalid TX_LASER_MDIO 0x%x", tx_en_mode);
break;
}
}
static void elink_sfp_set_transmitter(struct elink_params *params,
- struct elink_phy *phy, uint8_t tx_en)
+ struct elink_phy *phy,
+ uint8_t tx_en)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Setting SFP+ transmitter to %d", tx_en);
+ ELINK_DEBUG_P1(sc, "Setting SFP+ transmitter to %d", tx_en);
if (CHIP_IS_E3(sc))
elink_sfp_e3_set_transmitter(params, phy, tx_en);
else
@@ -7264,20 +8805,17 @@ static void elink_sfp_set_transmitter(struct elink_params *params,
}
static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy,
- struct elink_params
- *params,
- uint8_t dev_addr,
- uint16_t addr,
- uint8_t byte_cnt,
- uint8_t * o_buf,
- __rte_unused uint8_t
- is_init)
+ struct elink_params *params,
+ uint8_t dev_addr, uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t *o_buf, __rte_unused uint8_t is_init)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val = 0;
uint16_t i;
if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
- PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf");
+ ELINK_DEBUG_P0(sc,
+ "Reading from eeprom is limited to 0xf");
return ELINK_STATUS_ERROR;
}
/* Set the read command byte count */
@@ -7307,10 +8845,10 @@ static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy,
}
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
- MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
- PMD_DRV_LOG(DEBUG,
- "Got bad status 0x%x when reading from SFP+ EEPROM",
- (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ ELINK_DEBUG_P1(sc,
+ "Got bad status 0x%x when reading from SFP+ EEPROM",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
return ELINK_STATUS_ERROR;
}
@@ -7319,8 +8857,8 @@ static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
- o_buf[i] =
- (uint8_t) (val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
+ o_buf[i] = (uint8_t)
+ (val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
@@ -7343,29 +8881,27 @@ static void elink_warpcore_power_module(struct elink_params *params,
pin_cfg = (REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- e3_sfp_ctrl)) & PORT_HW_CFG_E3_PWR_DIS_MASK)
- >> PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
if (pin_cfg == PIN_CFG_NA)
return;
- PMD_DRV_LOG(DEBUG, "Setting SFP+ module power to %d using pin cfg %d",
- power, pin_cfg);
+ ELINK_DEBUG_P2(sc, "Setting SFP+ module power to %d using pin cfg %d",
+ power, pin_cfg);
/* Low ==> corresponding SFP+ module is powered
* high ==> the SFP+ module is powered down
*/
elink_set_cfg_pin(sc, pin_cfg, power ^ 1);
}
-
-static elink_status_t elink_warpcore_read_sfp_module_eeprom(__rte_unused struct
- elink_phy *phy,
- struct elink_params
- *params,
- uint8_t dev_addr,
- uint16_t addr,
- uint8_t byte_cnt,
- uint8_t * o_buf,
- uint8_t is_init)
+static elink_status_t elink_warpcore_read_sfp_module_eeprom(
+ __rte_unused struct elink_phy *phy,
+ struct elink_params *params,
+ uint8_t dev_addr,
+ uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t *o_buf,
+ uint8_t is_init)
{
elink_status_t rc = ELINK_STATUS_OK;
uint8_t i, j = 0, cnt = 0;
@@ -7374,8 +8910,8 @@ static elink_status_t elink_warpcore_read_sfp_module_eeprom(__rte_unused struct
struct bnx2x_softc *sc = params->sc;
if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
- PMD_DRV_LOG(DEBUG,
- "Reading from eeprom is limited to 16 bytes");
+ ELINK_DEBUG_P0(sc,
+ "Reading from eeprom is limited to 16 bytes");
return ELINK_STATUS_ERROR;
}
@@ -7388,13 +8924,15 @@ static elink_status_t elink_warpcore_read_sfp_module_eeprom(__rte_unused struct
DELAY(1000 * 1);
elink_warpcore_power_module(params, 1);
}
- rc = elink_bsc_read(params, sc, dev_addr, addr32, 0, byte_cnt,
+
+ elink_bsc_module_sel(params);
+ rc = elink_bsc_read(sc, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != ELINK_STATUS_OK) && (++cnt < I2C_WA_RETRY_CNT));
if (rc == ELINK_STATUS_OK) {
for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
- o_buf[j] = *((uint8_t *) data_array + i);
+ o_buf[j] = *((uint8_t *)data_array + i);
j++;
}
}
@@ -7403,20 +8941,18 @@ static elink_status_t elink_warpcore_read_sfp_module_eeprom(__rte_unused struct
}
static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
- struct elink_params
- *params,
- uint8_t dev_addr,
- uint16_t addr,
- uint8_t byte_cnt,
- uint8_t * o_buf,
- __rte_unused uint8_t
- is_init)
+ struct elink_params *params,
+ uint8_t dev_addr, uint16_t addr,
+ uint8_t byte_cnt,
+ uint8_t *o_buf,
+ __rte_unused uint8_t is_init)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val, i;
if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
- PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf");
+ ELINK_DEBUG_P0(sc,
+ "Reading from eeprom is limited to 0xf");
return ELINK_STATUS_ERROR;
}
@@ -7431,7 +8967,9 @@ static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
/* Need to read from 1.8000 to clear it */
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
/* Set the read command byte count */
elink_cl45_write(sc, phy,
@@ -7442,16 +8980,19 @@ static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
/* Set the read command address */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, addr);
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
/* Set the destination address */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- 0x8004, MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
/* Activate read command */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 0x8002);
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
/* Wait appropriate time for two-wire command to finish before
* polling the status register
*/
@@ -7469,10 +9010,10 @@ static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
}
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
- MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
- PMD_DRV_LOG(DEBUG,
- "Got bad status 0x%x when reading from SFP+ EEPROM",
- (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ ELINK_DEBUG_P1(sc,
+ "Got bad status 0x%x when reading from SFP+ EEPROM",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
return ELINK_STATUS_TIMEOUT;
}
@@ -7481,8 +9022,8 @@ static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
- o_buf[i] =
- (uint8_t) (val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
+ o_buf[i] = (uint8_t)
+ (val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
@@ -7497,21 +9038,18 @@ static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
return ELINK_STATUS_ERROR;
}
-
-static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
- struct elink_params *params,
- uint8_t dev_addr,
- uint16_t addr,
- uint16_t byte_cnt,
- uint8_t * o_buf)
+elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
+ struct elink_params *params, uint8_t dev_addr,
+ uint16_t addr, uint16_t byte_cnt,
+ uint8_t *o_buf)
{
- elink_status_t rc = ELINK_STATUS_OK;
+ elink_status_t rc = 0;
+ struct bnx2x_softc *sc = params->sc;
uint8_t xfer_size;
uint8_t *user_data = o_buf;
read_sfp_module_eeprom_func_p read_func;
-
if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
- PMD_DRV_LOG(DEBUG, "invalid dev_addr 0x%x", dev_addr);
+ ELINK_DEBUG_P1(sc, "invalid dev_addr 0x%x", dev_addr);
return ELINK_STATUS_ERROR;
}
@@ -7532,7 +9070,7 @@ static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
while (!rc && (byte_cnt > 0)) {
xfer_size = (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) ?
- ELINK_SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ ELINK_SFP_EEPROM_PAGE_SIZE : byte_cnt;
rc = read_func(phy, params, dev_addr, addr, xfer_size,
user_data, 0);
byte_cnt -= xfer_size;
@@ -7543,91 +9081,105 @@ static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
}
static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
- struct elink_params *params,
- uint16_t * edc_mode)
+ struct elink_params *params,
+ uint16_t *edc_mode)
{
struct bnx2x_softc *sc = params->sc;
uint32_t sync_offset = 0, phy_idx, media_types;
- uint8_t gport, val[2], check_limiting_mode = 0;
+ uint8_t val[ELINK_SFP_EEPROM_FC_TX_TECH_ADDR + 1];
+ uint8_t check_limiting_mode = 0;
*edc_mode = ELINK_EDC_MODE_LIMITING;
phy->media_type = ELINK_ETH_PHY_UNSPECIFIED;
/* First check for copper cable */
if (elink_read_sfp_module_eeprom(phy,
params,
ELINK_I2C_DEV_ADDR_A0,
- ELINK_SFP_EEPROM_CON_TYPE_ADDR,
- 2, (uint8_t *) val) != 0) {
- PMD_DRV_LOG(DEBUG, "Failed to read from SFP+ module EEPROM");
+ 0,
+ ELINK_SFP_EEPROM_FC_TX_TECH_ADDR + 1,
+ (uint8_t *)val) != 0) {
+ ELINK_DEBUG_P0(sc, "Failed to read from SFP+ module EEPROM");
return ELINK_STATUS_ERROR;
}
-
- switch (val[0]) {
+ params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+ params->link_attr_sync |= val[ELINK_SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+ LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+ elink_update_link_attr(params, params->link_attr_sync);
+ switch (val[ELINK_SFP_EEPROM_CON_TYPE_ADDR]) {
case ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER:
- {
- uint8_t copper_module_type;
- phy->media_type = ELINK_ETH_PHY_DA_TWINAX;
- /* Check if its active cable (includes SFP+ module)
- * of passive cable
+ {
+ uint8_t copper_module_type;
+ phy->media_type = ELINK_ETH_PHY_DA_TWINAX;
+ /* Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
+ copper_module_type = val[ELINK_SFP_EEPROM_FC_TX_TECH_ADDR];
+ if (copper_module_type &
+ ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
+ ELINK_DEBUG_P0(sc, "Active Copper cable detected");
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ *edc_mode = ELINK_EDC_MODE_ACTIVE_DAC;
+ else
+ check_limiting_mode = 1;
+ } else {
+ *edc_mode = ELINK_EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
*/
- if (elink_read_sfp_module_eeprom(phy,
- params,
- ELINK_I2C_DEV_ADDR_A0,
- ELINK_SFP_EEPROM_FC_TX_TECH_ADDR,
- 1,
- &copper_module_type) !=
- 0) {
- PMD_DRV_LOG(DEBUG,
- "Failed to read copper-cable-type"
- " from SFP+ EEPROM");
- return ELINK_STATUS_ERROR;
- }
-
if (copper_module_type &
- ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
- PMD_DRV_LOG(DEBUG,
- "Active Copper cable detected");
- if (phy->type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- *edc_mode = ELINK_EDC_MODE_ACTIVE_DAC;
- else
- check_limiting_mode = 1;
- } else if (copper_module_type &
- ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE)
- {
- PMD_DRV_LOG(DEBUG,
- "Passive Copper cable detected");
- *edc_mode = ELINK_EDC_MODE_PASSIVE_DAC;
+ ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ ELINK_DEBUG_P0(sc,
+ "Passive Copper cable detected");
} else {
- PMD_DRV_LOG(DEBUG,
- "Unknown copper-cable-type 0x%x !!!",
- copper_module_type);
- return ELINK_STATUS_ERROR;
+ ELINK_DEBUG_P0(sc,
+ "Unknown copper-cable-type");
}
- break;
}
+ break;
+ }
+ case ELINK_SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
case ELINK_SFP_EEPROM_CON_TYPE_VAL_LC:
case ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
- if ((val[1] & (ELINK_SFP_EEPROM_COMP_CODE_SR_MASK |
- ELINK_SFP_EEPROM_COMP_CODE_LR_MASK |
- ELINK_SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- PMD_DRV_LOG(DEBUG, "1G SFP module detected");
- gport = params->port;
+ /* Module is considered as 1G in case it's NOT compliant with
+ * any 10G ethernet protocol, but is 1G Ethernet compliant.
+ */
+ if (((val[ELINK_SFP_EEPROM_10G_COMP_CODE_ADDR] &
+ (ELINK_SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+ ELINK_SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+ ELINK_SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) &&
+ (val[ELINK_SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) {
+ ELINK_DEBUG_P0(sc, "1G SFP module detected");
phy->media_type = ELINK_ETH_PHY_SFP_1G_FIBER;
if (phy->req_line_speed != ELINK_SPEED_1000) {
+ uint8_t gport = params->port;
phy->req_line_speed = ELINK_SPEED_1000;
if (!CHIP_IS_E1x(sc)) {
gport = SC_PATH(sc) +
- (params->port << 1);
+ (params->port << 1);
}
- elink_cb_event_log(sc, ELINK_LOG_ID_NON_10G_MODULE, gport); //"Warning: Link speed was forced to 1000Mbps."
- // " Current SFP module in port %d is not"
- // " compliant with 10G Ethernet",
+ elink_cb_event_log(sc,
+ ELINK_LOG_ID_NON_10G_MODULE,
+ gport);
+ /*"Warning: Link speed was forced to 1000Mbps."
+ *" Current SFP module in port %d is not"
+ *" compliant with 10G Ethernet",
+ */
+ }
+ if (val[ELINK_SFP_EEPROM_1G_COMP_CODE_ADDR] &
+ ELINK_SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+ /* Some 1G-baseT modules will not link up,
+ * unless TX_EN is toggled with long delay in
+ * between.
+ */
+ elink_sfp_set_transmitter(params, phy, 0);
+ DELAY(1000 * 40);
+ elink_sfp_set_transmitter(params, phy, 1);
}
} else {
int idx, cfg_idx = 0;
- PMD_DRV_LOG(DEBUG, "10G Optic module detected");
+ ELINK_DEBUG_P0(sc, "10G Optic module detected");
for (idx = ELINK_INT_PHY; idx < ELINK_MAX_PHYS; idx++) {
if (params->phy[idx].type == phy->type) {
cfg_idx = ELINK_LINK_CONFIG_IDX(idx);
@@ -7639,24 +9191,22 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
}
break;
default:
- PMD_DRV_LOG(DEBUG, "Unable to determine module type 0x%x !!!",
- val[0]);
+ ELINK_DEBUG_P1(sc, "Unable to determine module type 0x%x !!!",
+ val[ELINK_SFP_EEPROM_CON_TYPE_ADDR]);
return ELINK_STATUS_ERROR;
}
sync_offset = params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].media_type);
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
media_types = REG_RD(sc, sync_offset);
/* Update media type for non-PMF sync */
for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) {
if (&(params->phy[phy_idx]) == phy) {
media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
- (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
- phy_idx));
- media_types |=
- ((phy->
- media_type & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
- (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
break;
}
}
@@ -7669,8 +9219,8 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
ELINK_SFP_EEPROM_OPTIONS_ADDR,
ELINK_SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
- PMD_DRV_LOG(DEBUG,
- "Failed to read Option field from module EEPROM");
+ ELINK_DEBUG_P0(sc,
+ "Failed to read Option field from module EEPROM");
return ELINK_STATUS_ERROR;
}
if ((options[0] & ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
@@ -7678,15 +9228,14 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
else
*edc_mode = ELINK_EDC_MODE_LIMITING;
}
- PMD_DRV_LOG(DEBUG, "EDC mode is set to 0x%x", *edc_mode);
+ ELINK_DEBUG_P1(sc, "EDC mode is set to 0x%x", *edc_mode);
return ELINK_STATUS_OK;
}
-
/* This function read the relevant field from the module (SFP+), and verify it
* is compliant with this board
*/
static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
- struct elink_params *params)
+ struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
uint32_t val, cmd;
@@ -7695,12 +9244,11 @@ static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
char vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE + 1];
phy->flags &= ~ELINK_FLAGS_SFP_NOT_APPROVED;
val = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_feature_config[params->port].
- config));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
- PMD_DRV_LOG(DEBUG, "NOT enforcing module verification");
+ ELINK_DEBUG_P0(sc, "NOT enforcing module verification");
return ELINK_STATUS_OK;
}
@@ -7710,23 +9258,24 @@ static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
} else if (params->feature_config_flags &
ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
- /* Use first phy request only in case of non-dual media */
+ /* Use first phy request only in case of non-dual media*/
if (ELINK_DUAL_MEDIA(params)) {
- PMD_DRV_LOG(DEBUG,
- "FW does not support OPT MDL verification");
+ ELINK_DEBUG_P0(sc,
+ "FW does not support OPT MDL verification");
return ELINK_STATUS_ERROR;
}
cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
} else {
/* No support in OPT MDL detection */
- PMD_DRV_LOG(DEBUG, "FW does not support OPT MDL verification");
+ ELINK_DEBUG_P0(sc,
+ "FW does not support OPT MDL verification");
return ELINK_STATUS_ERROR;
}
fw_cmd_param = ELINK_FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
fw_resp = elink_cb_fw_command(sc, cmd, fw_cmd_param);
if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
- PMD_DRV_LOG(DEBUG, "Approved module");
+ ELINK_DEBUG_P0(sc, "Approved module");
return ELINK_STATUS_OK;
}
@@ -7736,7 +9285,7 @@ static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
ELINK_I2C_DEV_ADDR_A0,
ELINK_SFP_EEPROM_VENDOR_NAME_ADDR,
ELINK_SFP_EEPROM_VENDOR_NAME_SIZE,
- (uint8_t *) vendor_name))
+ (uint8_t *)vendor_name))
vendor_name[0] = '\0';
else
vendor_name[ELINK_SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
@@ -7745,13 +9294,16 @@ static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
ELINK_I2C_DEV_ADDR_A0,
ELINK_SFP_EEPROM_PART_NO_ADDR,
ELINK_SFP_EEPROM_PART_NO_SIZE,
- (uint8_t *) vendor_pn))
+ (uint8_t *)vendor_pn))
vendor_pn[0] = '\0';
else
vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE] = '\0';
- elink_cb_event_log(sc, ELINK_LOG_ID_UNQUAL_IO_MODULE, params->port, vendor_name, vendor_pn); // "Warning: Unqualified SFP+ module detected,"
- // " Port %d from %s part number %s",
+ elink_cb_event_log(sc, ELINK_LOG_ID_UNQUAL_IO_MODULE, params->port,
+ vendor_name, vendor_pn);
+ /* "Warning: Unqualified SFP+ module detected,"
+ * " Port %d from %s part number %s",
+ */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG)
@@ -7759,13 +9311,14 @@ static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
return ELINK_STATUS_ERROR;
}
-static elink_status_t elink_wait_for_sfp_module_initialized(struct elink_phy
- *phy,
- struct elink_params
- *params)
+static elink_status_t elink_wait_for_sfp_module_initialized(
+ struct elink_phy *phy,
+ struct elink_params *params)
+
{
uint8_t val;
elink_status_t rc;
+ struct bnx2x_softc *sc = params->sc;
uint16_t timeout;
/* Initialization time after hot-plug may take up to 300ms for
* some phys type ( e.g. JDSU )
@@ -7773,18 +9326,17 @@ static elink_status_t elink_wait_for_sfp_module_initialized(struct elink_phy
for (timeout = 0; timeout < 60; timeout++) {
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- rc = elink_warpcore_read_sfp_module_eeprom(phy, params,
- ELINK_I2C_DEV_ADDR_A0,
- 1, 1, &val,
- 1);
+ rc = elink_warpcore_read_sfp_module_eeprom(
+ phy, params, ELINK_I2C_DEV_ADDR_A0, 1, 1, &val,
+ 1);
else
rc = elink_read_sfp_module_eeprom(phy, params,
ELINK_I2C_DEV_ADDR_A0,
1, 1, &val);
if (rc == 0) {
- PMD_DRV_LOG(DEBUG,
- "SFP+ module initialization took %d ms",
- timeout * 5);
+ ELINK_DEBUG_P1(sc,
+ "SFP+ module initialization took %d ms",
+ timeout * 5);
return ELINK_STATUS_OK;
}
DELAY(1000 * 5);
@@ -7795,8 +9347,8 @@ static elink_status_t elink_wait_for_sfp_module_initialized(struct elink_phy
}
static void elink_8727_power_module(struct bnx2x_softc *sc,
- struct elink_phy *phy, uint8_t is_power_up)
-{
+ struct elink_phy *phy,
+ uint8_t is_power_up) {
/* Make sure GPIOs are not using for LED mode */
uint16_t val;
/* In the GPIO register, bit 4 is use to determine if the GPIOs are
@@ -7821,29 +9373,33 @@ static void elink_8727_power_module(struct bnx2x_softc *sc,
val = (1 << 1);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
}
static elink_status_t elink_8726_set_limiting_mode(struct bnx2x_softc *sc,
- struct elink_phy *phy,
- uint16_t edc_mode)
+ struct elink_phy *phy,
+ uint16_t edc_mode)
{
uint16_t cur_limiting_mode;
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &cur_limiting_mode);
- PMD_DRV_LOG(DEBUG, "Current Limiting mode is 0x%x", cur_limiting_mode);
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
+ ELINK_DEBUG_P1(sc, "Current Limiting mode is 0x%x",
+ cur_limiting_mode);
if (edc_mode == ELINK_EDC_MODE_LIMITING) {
- PMD_DRV_LOG(DEBUG, "Setting LIMITING MODE");
+ ELINK_DEBUG_P0(sc, "Setting LIMITING MODE");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
ELINK_EDC_MODE_LIMITING);
- } else { /* LRM mode ( default ) */
+ } else { /* LRM mode ( default )*/
- PMD_DRV_LOG(DEBUG, "Setting LRM MODE");
+ ELINK_DEBUG_P0(sc, "Setting LRM MODE");
/* Changing to LRM mode takes quite few seconds. So do it only
* if current mode is limiting (default is LRM)
@@ -7852,27 +9408,35 @@ static elink_status_t elink_8726_set_limiting_mode(struct bnx2x_softc *sc,
return ELINK_STATUS_OK;
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LRM_MODE, 0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, 0x128);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL0, 0x4008);
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LRM_MODE, 0xaaaa);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
}
return ELINK_STATUS_OK;
}
static elink_status_t elink_8727_set_limiting_mode(struct bnx2x_softc *sc,
- struct elink_phy *phy,
- uint16_t edc_mode)
+ struct elink_phy *phy,
+ uint16_t edc_mode)
{
uint16_t phy_identifier;
uint16_t rom_ver2_val;
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &phy_identifier);
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
@@ -7880,7 +9444,9 @@ static elink_status_t elink_8727_set_limiting_mode(struct bnx2x_softc *sc,
(phy_identifier & ~(1 << 9)));
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, &rom_ver2_val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
@@ -7914,12 +9480,14 @@ static void elink_8727_specific_func(struct elink_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
(1 << 2) | (1 << 5));
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 0);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
/* Make MOD_ABS give interrupt on change */
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL, &val);
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
val |= (1 << 12);
if (phy->flags & ELINK_FLAGS_NOC)
val |= (3 << 5);
@@ -7927,29 +9495,27 @@ static void elink_8727_specific_func(struct elink_phy *phy,
* status which reflect SFP+ module over-current
*/
if (!(phy->flags & ELINK_FLAGS_NOC))
- val &= 0xff8f; /* Reset bits 4-6 */
+ val &= 0xff8f; /* Reset bits 4-6 */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
val);
break;
default:
- PMD_DRV_LOG(DEBUG, "Function 0x%x not supported by 8727",
- action);
+ ELINK_DEBUG_P1(sc, "Function 0x%x not supported by 8727",
+ action);
return;
}
}
static void elink_set_e1e2_module_fault_led(struct elink_params *params,
- uint8_t gpio_mode)
+ uint8_t gpio_mode)
{
struct bnx2x_softc *sc = params->sc;
uint32_t fault_led_gpio = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.
- port_hw_config[params->port].
- sfp_ctrl)) &
- PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
switch (fault_led_gpio) {
case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
return;
@@ -7957,19 +9523,19 @@ static void elink_set_e1e2_module_fault_led(struct elink_params *params,
case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
- {
- uint8_t gpio_port = elink_get_gpio_port(params);
- uint16_t gpio_pin = fault_led_gpio -
- PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
- PMD_DRV_LOG(DEBUG, "Set fault module-detected led "
- "pin %x port %x mode %x",
- gpio_pin, gpio_port, gpio_mode);
- elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
- }
- break;
+ {
+ uint8_t gpio_port = elink_get_gpio_port(params);
+ uint16_t gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ ELINK_DEBUG_P3(sc, "Set fault module-detected led "
+ "pin %x port %x mode %x",
+ gpio_pin, gpio_port, gpio_mode);
+ elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
default:
- PMD_DRV_LOG(DEBUG, "Error: Invalid fault led mode 0x%x",
- fault_led_gpio);
+ ELINK_DEBUG_P1(sc, "Error: Invalid fault led mode 0x%x",
+ fault_led_gpio);
}
}
@@ -7980,12 +9546,12 @@ static void elink_set_e3_module_fault_led(struct elink_params *params,
uint8_t port = params->port;
struct bnx2x_softc *sc = params->sc;
pin_cfg = (REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].e3_sfp_ctrl)) &
- PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
- PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
- PMD_DRV_LOG(DEBUG, "Setting Fault LED to %d using pin cfg %d",
- gpio_mode, pin_cfg);
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+ PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+ ELINK_DEBUG_P2(sc, "Setting Fault LED to %d using pin cfg %d",
+ gpio_mode, pin_cfg);
elink_set_cfg_pin(sc, pin_cfg, gpio_mode);
}
@@ -7993,7 +9559,7 @@ static void elink_set_sfp_module_fault_led(struct elink_params *params,
uint8_t gpio_mode)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Setting SFP+ module fault LED to %d", gpio_mode);
+ ELINK_DEBUG_P1(sc, "Setting SFP+ module fault LED to %d", gpio_mode);
if (CHIP_IS_E3(sc)) {
/* Low ==> if SFP+ module is supported otherwise
* High ==> if SFP+ module is not on the approved vendor list
@@ -8018,9 +9584,11 @@ static void elink_warpcore_hw_reset(__rte_unused struct elink_phy *phy,
}
static void elink_power_sfp_module(struct elink_params *params,
- struct elink_phy *phy, uint8_t power)
+ struct elink_phy *phy,
+ uint8_t power)
{
- PMD_DRV_LOG(DEBUG, "Setting SFP+ power to %x", power);
+ struct bnx2x_softc *sc = params->sc;
+ ELINK_DEBUG_P1(sc, "Setting SFP+ power to %x", power);
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727:
@@ -8034,7 +9602,6 @@ static void elink_power_sfp_module(struct elink_params *params,
break;
}
}
-
static void elink_warpcore_set_limiting_mode(struct elink_params *params,
struct elink_phy *phy,
uint16_t edc_mode)
@@ -8043,7 +9610,7 @@ static void elink_warpcore_set_limiting_mode(struct elink_params *params,
uint16_t mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
struct bnx2x_softc *sc = params->sc;
- uint8_t lane = elink_get_warpcore_lane(params);
+ uint8_t lane = elink_get_warpcore_lane(phy, params);
/* This is a global register which controls all lanes */
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
@@ -8076,7 +9643,8 @@ static void elink_warpcore_set_limiting_mode(struct elink_params *params,
}
static void elink_set_limiting_mode(struct elink_params *params,
- struct elink_phy *phy, uint16_t edc_mode)
+ struct elink_phy *phy,
+ uint16_t edc_mode)
{
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726:
@@ -8092,30 +9660,28 @@ static void elink_set_limiting_mode(struct elink_params *params,
}
}
-static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
- struct elink_params *params)
+elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
+ struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
uint16_t edc_mode;
elink_status_t rc = ELINK_STATUS_OK;
uint32_t val = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_feature_config[params->
- port].
- config));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
/* Enabled transmitter by default */
elink_sfp_set_transmitter(params, phy, 1);
- PMD_DRV_LOG(DEBUG, "SFP+ module plugged in/out detected on port %d",
- params->port);
+ ELINK_DEBUG_P1(sc, "SFP+ module plugged in/out detected on port %d",
+ params->port);
/* Power up module */
elink_power_sfp_module(params, phy, 1);
if (elink_get_edc_mode(phy, params, &edc_mode) != 0) {
- PMD_DRV_LOG(DEBUG, "Failed to get valid module type");
+ ELINK_DEBUG_P0(sc, "Failed to get valid module type");
return ELINK_STATUS_ERROR;
} else if (elink_verify_sfp_module(phy, params) != 0) {
/* Check SFP+ module compatibility */
- PMD_DRV_LOG(DEBUG, "Module verification failed!!");
+ ELINK_DEBUG_P0(sc, "Module verification failed!!");
rc = ELINK_STATUS_ERROR;
/* Turn on fault module-detected led */
elink_set_sfp_module_fault_led(params,
@@ -8123,8 +9689,8 @@ static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
/* Check if need to power down the SFP+ module */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
- PMD_DRV_LOG(DEBUG, "Shutdown SFP+ module!!");
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
+ ELINK_DEBUG_P0(sc, "Shutdown SFP+ module!!");
elink_power_sfp_module(params, phy, 0);
return rc;
}
@@ -8157,22 +9723,22 @@ void elink_handle_module_detect_int(struct elink_params *params)
uint8_t gpio_num, gpio_port;
if (CHIP_IS_E3(sc)) {
phy = &params->phy[ELINK_INT_PHY];
- /* Always enable TX laser,will be disabled in case of fault */
+ /* Always enable TX laser, will be disabled in case of fault */
elink_sfp_set_transmitter(params, phy, 1);
} else {
phy = &params->phy[ELINK_EXT_PHY1];
}
- if (elink_get_mod_abs_int_cfg(sc, params->shmem_base,
+ if (elink_get_mod_abs_int_cfg(sc, params->chip_id, params->shmem_base,
params->port, &gpio_num, &gpio_port) ==
ELINK_STATUS_ERROR) {
- PMD_DRV_LOG(DEBUG, "Failed to get MOD_ABS interrupt config");
+ ELINK_DEBUG_P0(sc, "Failed to get MOD_ABS interrupt config");
return;
}
/* Set valid module led off */
elink_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
- /* Get current gpio val reflecting module plugged in / out */
+ /* Get current gpio val reflecting module plugged in / out*/
gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port);
/* Call the handling function in case module is detected */
@@ -8182,8 +9748,8 @@ void elink_handle_module_detect_int(struct elink_params *params)
elink_power_sfp_module(params, phy, 1);
elink_cb_gpio_int_write(sc, gpio_num,
- MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
- gpio_port);
+ MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+ gpio_port);
if (elink_wait_for_sfp_module_initialized(phy, params) == 0) {
elink_sfp_module_detection(phy, params);
if (CHIP_IS_E3(sc)) {
@@ -8205,12 +9771,12 @@ void elink_handle_module_detect_int(struct elink_params *params)
}
}
} else {
- PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized");
+ ELINK_DEBUG_P0(sc, "SFP+ module is not initialized");
}
} else {
elink_cb_gpio_int_write(sc, gpio_num,
- MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
- gpio_port);
+ MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+ gpio_port);
/* Module was plugged out.
* Disable transmit for this module
*/
@@ -8228,9 +9794,11 @@ static void elink_sfp_mask_fault(struct bnx2x_softc *sc,
{
uint16_t alarm_status, val;
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, alarm_status_offset, &alarm_status);
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, alarm_status_offset, &alarm_status);
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
/* Mask or enable the fault event. */
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
if (alarm_status & (1 << 0))
@@ -8239,37 +9807,42 @@ static void elink_sfp_mask_fault(struct bnx2x_softc *sc,
val |= (1 << 0);
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
}
-
/******************************************************************/
/* common BNX2X8706/BNX2X8726 PHY SECTION */
/******************************************************************/
static uint8_t elink_8706_8726_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint8_t link_up = 0;
uint16_t val1, val2, rx_sd, pcs_status;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "XGXS 8706/8726");
- /* Clear RX Alarm */
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+ ELINK_DEBUG_P0(sc, "XGXS 8706/8726");
+ /* Clear RX Alarm*/
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
- /* Clear LASI indication */
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
- PMD_DRV_LOG(DEBUG, "8706/8726 LASI status 0x%x--> 0x%x", val1, val2);
+ /* Clear LASI indication*/
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ ELINK_DEBUG_P2(sc, "8706/8726 LASI status 0x%x--> 0x%x", val1, val2);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
elink_cl45_read(sc, phy,
MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
- elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
- elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
- PMD_DRV_LOG(DEBUG, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
- " link_status 0x%x", rx_sd, pcs_status, val2);
+ ELINK_DEBUG_P3(sc, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ " link_status 0x%x", rx_sd, pcs_status, val2);
/* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
* are set, or if the autoneg bit 1 is set
*/
@@ -8286,9 +9859,9 @@ static uint8_t elink_8706_8726_read_status(struct elink_phy *phy,
/* Capture 10G link fault. Read twice to clear stale value. */
if (vars->line_speed == ELINK_SPEED_10000) {
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_LASI_TXSTAT, &val1);
+ MDIO_PMA_LASI_TXSTAT, &val1);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_LASI_TXSTAT, &val1);
+ MDIO_PMA_LASI_TXSTAT, &val1);
if (val1 & (1 << 0))
vars->fault_detected = 1;
}
@@ -8300,15 +9873,15 @@ static uint8_t elink_8706_8726_read_status(struct elink_phy *phy,
/* BNX2X8706 PHY SECTION */
/******************************************************************/
static uint8_t elink_8706_config_init(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars *vars)
+ struct elink_params *params,
+ __rte_unused struct elink_vars *vars)
{
uint32_t tx_en_mode;
uint16_t cnt, val, tmp1;
struct bnx2x_softc *sc = params->sc;
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
elink_ext_phy_hw_reset(sc, params->port);
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
@@ -8322,34 +9895,35 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
break;
DELAY(1000 * 10);
}
- PMD_DRV_LOG(DEBUG, "XGXS 8706 is initialized after %d ms", cnt);
+ ELINK_DEBUG_P1(sc, "XGXS 8706 is initialized after %d ms", cnt);
if ((params->feature_config_flags &
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
uint8_t i;
uint16_t reg;
for (i = 0; i < 4; i++) {
reg = MDIO_XS_8706_REG_BANK_RX0 +
- i * (MDIO_XS_8706_REG_BANK_RX1 -
- MDIO_XS_8706_REG_BANK_RX0);
+ i * (MDIO_XS_8706_REG_BANK_RX1 -
+ MDIO_XS_8706_REG_BANK_RX0);
elink_cl45_read(sc, phy, MDIO_XS_DEVAD, reg, &val);
/* Clear first 3 bits of the control */
val &= ~0x7;
/* Set control bits according to configuration */
val |= (phy->rx_preemphasis[i] & 0x7);
- PMD_DRV_LOG(DEBUG, "Setting RX Equalizer to BNX2X8706"
- " reg 0x%x <-- val 0x%x", reg, val);
+ ELINK_DEBUG_P2(sc, "Setting RX Equalizer to BNX2X8706"
+ " reg 0x%x <-- val 0x%x", reg, val);
elink_cl45_write(sc, phy, MDIO_XS_DEVAD, reg, val);
}
}
/* Force speed */
if (phy->req_line_speed == ELINK_SPEED_10000) {
- PMD_DRV_LOG(DEBUG, "XGXS 8706 force 10Gbps");
+ ELINK_DEBUG_P0(sc, "XGXS 8706 force 10Gbps");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 0);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
/* Arm LASI for link and Tx fault. */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
@@ -8357,7 +9931,7 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
/* Force 1Gbps using autoneg with 1G advertisement */
/* Allow CL37 through CL73 */
- PMD_DRV_LOG(DEBUG, "XGXS 8706 AutoNeg");
+ ELINK_DEBUG_P0(sc, "XGXS 8706 AutoNeg");
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
@@ -8375,9 +9949,11 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x0400);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x0400);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ 0x0004);
}
elink_save_bnx2x_spirom_ver(sc, phy, params->port);
@@ -8387,27 +9963,24 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
tx_en_mode = REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- sfp_ctrl))
- & PORT_HW_CFG_TX_LASER_MASK;
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
- PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS");
+ ELINK_DEBUG_P0(sc, "Enabling TXONOFF_PWRDN_DIS");
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL,
- &tmp1);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
tmp1 |= 0x1;
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL,
- tmp1);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
}
return ELINK_STATUS_OK;
}
static uint8_t elink_8706_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
return elink_8706_8726_read_status(phy, params, vars);
}
@@ -8419,7 +9992,7 @@ static void elink_8726_config_loopback(struct elink_phy *phy,
struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "PMA/PMD ext_phy_loopback: 8726");
+ ELINK_DEBUG_P0(sc, "PMA/PMD ext_phy_loopback: 8726");
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
}
@@ -8441,7 +10014,8 @@ static void elink_8726_external_rom_boot(struct elink_phy *phy,
MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
@@ -8453,15 +10027,16 @@ static void elink_8726_external_rom_boot(struct elink_phy *phy,
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
DELAY(1000 * 200);
elink_save_bnx2x_spirom_ver(sc, phy, params->port);
}
static uint8_t elink_8726_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val1;
@@ -8471,7 +10046,7 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
&val1);
if (val1 & (1 << 15)) {
- PMD_DRV_LOG(DEBUG, "Tx is disabled");
+ ELINK_DEBUG_P0(sc, "Tx is disabled");
link_up = 0;
vars->line_speed = 0;
}
@@ -8479,12 +10054,13 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy,
return link_up;
}
+
static uint8_t elink_8726_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726");
+ ELINK_DEBUG_P0(sc, "Initializing BNX2X8726");
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
elink_wait_reset_complete(sc, phy, params);
@@ -8499,7 +10075,7 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
elink_sfp_module_detection(phy, params);
if (phy->req_line_speed == ELINK_SPEED_1000) {
- PMD_DRV_LOG(DEBUG, "Setting 1G force");
+ ELINK_DEBUG_P0(sc, "Setting 1G force");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
elink_cl45_write(sc, phy,
@@ -8507,17 +10083,19 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
} else if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
- PMD_DRV_LOG(DEBUG, "Setting 1G clause37");
+ ELINK_DEBUG_P0(sc, "Setting 1G clause37");
/* Set Flow control */
elink_ext_phy_set_pause(params, phy, vars);
- elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
elink_cl45_write(sc, phy,
@@ -8525,16 +10103,17 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
elink_cl45_write(sc, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
/* Enable RX-ALARM control to receive interrupt for 1G speed
* change
*/
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
- } else { /* Default 10G. Set only LASI control */
+ } else { /* Default 10G. Set only LASI control */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
}
@@ -8542,9 +10121,10 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- PMD_DRV_LOG(DEBUG,
- "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
- phy->tx_preemphasis[0], phy->tx_preemphasis[1]);
+ ELINK_DEBUG_P2(sc,
+ "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8726_TX_CTRL1,
@@ -8564,10 +10144,11 @@ static void elink_8726_link_reset(struct elink_phy *phy,
struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "elink_8726_link_reset port %d", params->port);
+ ELINK_DEBUG_P1(sc, "elink_8726_link_reset port %d", params->port);
/* Set serial boot control for external load */
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL, 0x0001);
}
/******************************************************************/
@@ -8600,22 +10181,28 @@ static void elink_8727_set_link_led(struct elink_phy *phy,
break;
}
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
val &= 0xff8f;
val |= led_mode_bitmask;
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val);
val &= 0xffe0;
val |= gpio_pins_bitmask;
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
}
-
static void elink_8727_hw_reset(__rte_unused struct elink_phy *phy,
- struct elink_params *params)
-{
+ struct elink_params *params) {
uint32_t swap_val, swap_override;
uint8_t port;
/* The PHY reset is controlled by GPIO 1. Fake the port number
@@ -8626,7 +10213,7 @@ static void elink_8727_hw_reset(__rte_unused struct elink_phy *phy,
swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
port = (swap_val && swap_override) ^ 1;
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
static void elink_8727_config_speed(struct elink_phy *phy,
@@ -8637,14 +10224,14 @@ static void elink_8727_config_speed(struct elink_phy *phy,
/* Set option 1G speed */
if ((phy->req_line_speed == ELINK_SPEED_1000) ||
(phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER)) {
- PMD_DRV_LOG(DEBUG, "Setting 1G force");
+ ELINK_DEBUG_P0(sc, "Setting 1G force");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
- PMD_DRV_LOG(DEBUG, "1.7 = 0x%x", tmp1);
+ ELINK_DEBUG_P1(sc, "1.7 = 0x%x", tmp1);
/* Power down the XAUI until link is up in case of dual-media
* and 1G
*/
@@ -8661,10 +10248,10 @@ static void elink_8727_config_speed(struct elink_phy *phy,
((phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
- PMD_DRV_LOG(DEBUG, "Setting 1G clause37");
+ ELINK_DEBUG_P0(sc, "Setting 1G clause37");
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
elink_cl45_write(sc, phy,
@@ -8687,9 +10274,8 @@ static void elink_8727_config_speed(struct elink_phy *phy,
}
static uint8_t elink_8727_config_init(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars
- *vars)
+ struct elink_params *params,
+ __rte_unused struct elink_vars *vars)
{
uint32_t tx_en_mode;
uint16_t tmp1, mod_abs, tmp2;
@@ -8698,7 +10284,7 @@ static uint8_t elink_8727_config_init(struct elink_phy *phy,
elink_wait_reset_complete(sc, phy, params);
- PMD_DRV_LOG(DEBUG, "Initializing BNX2X8727");
+ ELINK_DEBUG_P0(sc, "Initializing BNX2X8727");
elink_8727_specific_func(phy, params, ELINK_PHY_INIT);
/* Initially configure MOD_ABS to interrupt when module is
@@ -8724,15 +10310,18 @@ static uint8_t elink_8727_config_init(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
elink_8727_config_speed(phy, params);
+
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- PMD_DRV_LOG(DEBUG, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
- phy->tx_preemphasis[0], phy->tx_preemphasis[1]);
+ ELINK_DEBUG_P2(sc, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
phy->tx_preemphasis[0]);
@@ -8747,25 +10336,24 @@ static uint8_t elink_8727_config_init(struct elink_phy *phy,
*/
tx_en_mode = REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- sfp_ctrl))
- & PORT_HW_CFG_TX_LASER_MASK;
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
- PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS");
+ ELINK_DEBUG_P0(sc, "Enabling TXONOFF_PWRDN_DIS");
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG,
- &tmp2);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
tmp2 |= 0x1000;
tmp2 &= 0xFFEF;
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG,
- tmp2);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &tmp2);
- elink_cl45_write(sc, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, (tmp2 & 0x7fff));
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &tmp2);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ (tmp2 & 0x7fff));
}
return ELINK_STATUS_OK;
@@ -8777,15 +10365,17 @@ static void elink_8727_handle_mod_abs(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t mod_abs, rx_alarm_status;
uint32_t val = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_feature_config[params->
- port].config));
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
- &mod_abs);
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
if (mod_abs & (1 << 8)) {
/* Module is absent */
- PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is absent");
+ ELINK_DEBUG_P0(sc,
+ "MOD_ABS indication show module is absent");
phy->media_type = ELINK_ETH_PHY_NOT_PRESENT;
/* 1. Set mod_abs to detect next module
* presence event
@@ -8810,7 +10400,8 @@ static void elink_8727_handle_mod_abs(struct elink_phy *phy,
} else {
/* Module is present */
- PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is present");
+ ELINK_DEBUG_P0(sc,
+ "MOD_ABS indication show module is present");
/* First disable transmitter, and if the module is ok, the
* module_detection will enable it
* 1. Set mod_abs to detect next module absent event ( bit 8)
@@ -8834,51 +10425,56 @@ static void elink_8727_handle_mod_abs(struct elink_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
elink_sfp_set_transmitter(params, phy, 0);
- if (elink_wait_for_sfp_module_initialized(phy, params) == 0) {
+ if (elink_wait_for_sfp_module_initialized(phy, params) == 0)
elink_sfp_module_detection(phy, params);
- } else {
- PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized");
- }
+ else
+ ELINK_DEBUG_P0(sc, "SFP+ module is not initialized");
/* Reconfigure link speed based on module type limitations */
elink_8727_config_speed(phy, params);
}
- PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
+ ELINK_DEBUG_P1(sc, "8727 RX_ALARM_STATUS 0x%x",
+ rx_alarm_status);
/* No need to check link status in case of module plugged in/out */
}
static uint8_t elink_8727_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
+
{
struct bnx2x_softc *sc = params->sc;
- uint8_t link_up = 0, oc_port = params->port;
+ uint8_t link_up = 0;
uint16_t link_status = 0;
uint16_t rx_alarm_status, lasi_ctrl, val1;
/* If PHY is not initialized, do not check link status */
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, &lasi_ctrl);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
if (!lasi_ctrl)
return 0;
/* Check the LASI on Rx */
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
+ &rx_alarm_status);
vars->line_speed = 0;
- PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
+ ELINK_DEBUG_P1(sc, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
- PMD_DRV_LOG(DEBUG, "8727 LASI status 0x%x", val1);
+ ELINK_DEBUG_P1(sc, "8727 LASI status 0x%x", val1);
/* Clear MSG-OUT */
elink_cl45_read(sc, phy,
@@ -8888,24 +10484,28 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
* for over current
*/
if (!(phy->flags & ELINK_FLAGS_NOC) && !(rx_alarm_status & (1 << 5))) {
- /* Check over-current using 8727 GPIO0 input */
+ /* Check over-current using 8727 GPIO0 input*/
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
&val1);
if ((val1 & (1 << 8)) == 0) {
+ uint8_t oc_port = params->port;
if (!CHIP_IS_E1x(sc))
oc_port = SC_PATH(sc) + (params->port << 1);
- PMD_DRV_LOG(DEBUG,
- "8727 Power fault has been detected on port %d",
- oc_port);
- elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, oc_port); //"Error: Power fault on Port %d has "
- // "been detected and the power to "
- // "that SFP+ module has been removed "
- // "to prevent failure of the card. "
- // "Please remove the SFP+ module and "
- // "restart the system to clear this "
- // "error.",
+ ELINK_DEBUG_P1(sc,
+ "8727 Power fault has been detected on port %d",
+ oc_port);
+ elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT,
+ oc_port);
+ /* "Error: Power fault on Port %d has "
+ * "been detected and the power to "
+ * "that SFP+ module has been removed "
+ * "to prevent failure of the card. "
+ * "Please remove the SFP+ module and "
+ * "restart the system to clear this "
+ * "error.",
+ */
/* Disable all RX_ALARMs except for mod_abs */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
@@ -8921,14 +10521,13 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
MDIO_PMA_REG_PHY_IDENTIFIER, val1);
/* Clear RX alarm */
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
elink_8727_power_module(params->sc, phy, 0);
return 0;
}
- }
+ } /* Over current check */
- /* Over current check */
/* When module absent bit is set, check module */
if (rx_alarm_status & (1 << 5)) {
elink_8727_handle_mod_abs(phy, params);
@@ -8939,10 +10538,10 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
}
if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) {
- PMD_DRV_LOG(DEBUG, "Enabling 8727 TX laser");
+ ELINK_DEBUG_P0(sc, "Enabling 8727 TX laser");
elink_sfp_set_transmitter(params, phy, 1);
} else {
- PMD_DRV_LOG(DEBUG, "Tx is disabled");
+ ELINK_DEBUG_P0(sc, "Tx is disabled");
return 0;
}
@@ -8956,26 +10555,26 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_10000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G",
- params->port);
+ ELINK_DEBUG_P1(sc, "port %x: External link up in 10G",
+ params->port);
} else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_1000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G",
- params->port);
+ ELINK_DEBUG_P1(sc, "port %x: External link up in 1G",
+ params->port);
} else {
link_up = 0;
- PMD_DRV_LOG(DEBUG, "port %x: External link is down",
- params->port);
+ ELINK_DEBUG_P1(sc, "port %x: External link is down",
+ params->port);
}
/* Capture 10G link fault. */
if (vars->line_speed == ELINK_SPEED_10000) {
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_LASI_TXSTAT, &val1);
+ MDIO_PMA_LASI_TXSTAT, &val1);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_LASI_TXSTAT, &val1);
+ MDIO_PMA_LASI_TXSTAT, &val1);
if (val1 & (1 << 0)) {
vars->fault_detected = 1;
@@ -8985,7 +10584,7 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
if (link_up) {
elink_ext_phy_resolve_fc(phy, params, vars);
vars->duplex = DUPLEX_FULL;
- PMD_DRV_LOG(DEBUG, "duplex = 0x%x", vars->duplex);
+ ELINK_DEBUG_P1(sc, "duplex = 0x%x", vars->duplex);
}
if ((ELINK_DUAL_MEDIA(params)) &&
@@ -9025,8 +10624,16 @@ static void elink_8727_link_reset(struct elink_phy *phy,
/******************************************************************/
/* BNX2X8481/BNX2X84823/BNX2X84833 PHY SECTION */
/******************************************************************/
+static int elink_is_8483x_8485x(struct elink_phy *phy)
+{
+ return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858));
+}
+
static void elink_save_848xx_spirom_version(struct elink_phy *phy,
- struct bnx2x_softc *sc, uint8_t port)
+ struct bnx2x_softc *sc,
+ uint8_t port)
{
uint16_t val, fw_ver2, cnt, i;
static struct elink_reg_set reg_set[] = {
@@ -9038,11 +10645,10 @@ static void elink_save_848xx_spirom_version(struct elink_phy *phy,
};
uint16_t fw_ver1;
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ if (elink_is_8483x_8485x(phy)) {
elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
elink_save_spirom_version(sc, port, fw_ver1 & 0xfff,
- phy->ver_addr);
+ phy->ver_addr);
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
@@ -9057,12 +10663,14 @@ static void elink_save_848xx_spirom_version(struct elink_phy *phy,
DELAY(5);
}
if (cnt == 100) {
- PMD_DRV_LOG(DEBUG, "Unable to read 848xx "
- "phy fw version(1)");
- elink_save_spirom_version(sc, port, 0, phy->ver_addr);
+ ELINK_DEBUG_P0(sc, "Unable to read 848xx "
+ "phy fw version(1)");
+ elink_save_spirom_version(sc, port, 0,
+ phy->ver_addr);
return;
}
+
/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
@@ -9074,9 +10682,10 @@ static void elink_save_848xx_spirom_version(struct elink_phy *phy,
DELAY(5);
}
if (cnt == 100) {
- PMD_DRV_LOG(DEBUG, "Unable to read 848xx phy fw "
- "version(2)");
- elink_save_spirom_version(sc, port, 0, phy->ver_addr);
+ ELINK_DEBUG_P0(sc, "Unable to read 848xx phy fw "
+ "version(2)");
+ elink_save_spirom_version(sc, port, 0,
+ phy->ver_addr);
return;
}
@@ -9090,8 +10699,8 @@ static void elink_save_848xx_spirom_version(struct elink_phy *phy,
}
}
-
-static void elink_848xx_set_led(struct bnx2x_softc *sc, struct elink_phy *phy)
+static void elink_848xx_set_led(struct bnx2x_softc *sc,
+ struct elink_phy *phy)
{
uint16_t val, offset, i;
static struct elink_reg_set reg_set[] = {
@@ -9100,29 +10709,30 @@ static void elink_848xx_set_led(struct bnx2x_softc *sc, struct elink_phy *phy)
{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
{MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
- MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
{MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
};
/* PHYC_CTL_LED_CTL */
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
val &= 0xFE00;
val |= 0x0092;
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834))
+ if (elink_is_8483x_8485x(phy))
offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
else
offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
- /* stretch_en for LED3 */
+ /* stretch_en for LED3*/
elink_cl45_read_or_write(sc, phy,
MDIO_PMA_DEVAD, offset,
MDIO_PMA_REG_84823_LED3_STRETCH_EN);
@@ -9135,8 +10745,7 @@ static void elink_848xx_specific_func(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
switch (action) {
case ELINK_PHY_INIT:
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ if (!elink_is_8483x_8485x(phy)) {
/* Save spirom version */
elink_save_848xx_spirom_version(phy, sc, params->port);
}
@@ -9153,14 +10762,15 @@ static void elink_848xx_specific_func(struct elink_phy *phy,
}
static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t autoneg_val, an_1000_val, an_10_100_val;
elink_848xx_specific_func(phy, params, ELINK_PHY_INIT);
- elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
/* set 1000 speed advertisement */
elink_cl45_read(sc, phy,
@@ -9170,24 +10780,25 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
elink_ext_phy_set_pause(params, phy, vars);
elink_cl45_read(sc, phy,
MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_AN_ADV, &an_10_100_val);
+ MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ &an_10_100_val);
elink_cl45_read(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
&autoneg_val);
/* Disable forced speed */
- autoneg_val &=
- ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 13));
+ autoneg_val &= ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) |
+ (1 << 13));
an_10_100_val &= ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8));
if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(phy->req_line_speed == ELINK_SPEED_1000)) {
an_1000_val |= (1 << 8);
autoneg_val |= (1 << 9 | 1 << 12);
if (phy->req_duplex == DUPLEX_FULL)
an_1000_val |= (1 << 9);
- PMD_DRV_LOG(DEBUG, "Advertising 1G");
+ ELINK_DEBUG_P0(sc, "Advertising 1G");
} else
an_1000_val &= ~((1 << 8) | (1 << 9));
@@ -9203,7 +10814,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
*/
autoneg_val |= (1 << 9 | 1 << 12);
an_10_100_val |= (1 << 8);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-FD");
+ ELINK_DEBUG_P0(sc, "Advertising 100M-FD");
}
if (phy->speed_cap_mask &
@@ -9212,7 +10823,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
*/
autoneg_val |= (1 << 9 | 1 << 12);
an_10_100_val |= (1 << 7);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-HD");
+ ELINK_DEBUG_P0(sc, "Advertising 100M-HD");
}
if ((phy->speed_cap_mask &
@@ -9220,7 +10831,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
(phy->supported & ELINK_SUPPORTED_10baseT_Full)) {
an_10_100_val |= (1 << 6);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-FD");
+ ELINK_DEBUG_P0(sc, "Advertising 10M-FD");
}
if ((phy->speed_cap_mask &
@@ -9228,14 +10839,15 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
(phy->supported & ELINK_SUPPORTED_10baseT_Half)) {
an_10_100_val |= (1 << 5);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-HD");
+ ELINK_DEBUG_P0(sc, "Advertising 10M-HD");
}
}
/* Only 10/100 are allowed to work in FORCE mode */
if ((phy->req_line_speed == ELINK_SPEED_100) &&
(phy->supported &
- (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full))) {
+ (ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full))) {
autoneg_val |= (1 << 13);
/* Enabled AUTO-MDIX when autoneg is disabled */
elink_cl45_write(sc, phy,
@@ -9243,16 +10855,17 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
(1 << 15 | 1 << 9 | 7 << 0));
/* The PHY needs this set even for forced link. */
an_10_100_val |= (1 << 8) | (1 << 7);
- PMD_DRV_LOG(DEBUG, "Setting 100M force");
+ ELINK_DEBUG_P0(sc, "Setting 100M force");
}
if ((phy->req_line_speed == ELINK_SPEED_10) &&
(phy->supported &
- (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full))) {
+ (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full))) {
/* Enabled AUTO-MDIX when autoneg is disabled */
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
(1 << 15 | 1 << 9 | 7 << 0));
- PMD_DRV_LOG(DEBUG, "Setting 10M force");
+ ELINK_DEBUG_P0(sc, "Setting 10M force");
}
elink_cl45_write(sc, phy,
@@ -9265,42 +10878,44 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
/* Always write this if this is not 84833/4.
* For 84833/4, write it only when it's a forced speed.
*/
- if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) ||
+ if (!elink_is_8483x_8485x(phy) ||
((autoneg_val & (1 << 12)) == 0))
elink_cl45_write(sc, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
(phy->req_line_speed == ELINK_SPEED_10000)) {
- PMD_DRV_LOG(DEBUG, "Advertising 10G");
- /* Restart autoneg for 10G */
+ ELINK_DEBUG_P0(sc, "Advertising 10G");
+ /* Restart autoneg for 10G*/
- elink_cl45_read_or_write(sc, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
- 0x1000);
+ elink_cl45_read_or_write(
+ sc, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 0x1000);
elink_cl45_write(sc, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x3200);
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
} else
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 1);
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 1);
return ELINK_STATUS_OK;
}
static uint8_t elink_8481_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
- /* Restore normal power mode */
+ /* Restore normal power mode*/
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
elink_ext_phy_hw_reset(sc, params->port);
@@ -9310,101 +10925,219 @@ static uint8_t elink_8481_config_init(struct elink_phy *phy,
return elink_848xx_cmn_config_init(phy, params, vars);
}
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
-static elink_status_t elink_84833_cmd_hdlr(struct elink_phy *phy,
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static elink_status_t elink_84858_cmd_hdlr(struct elink_phy *phy,
struct elink_params *params,
- uint16_t fw_cmd, uint16_t cmd_args[],
- int argc)
+ uint16_t fw_cmd,
+ uint16_t cmd_args[], int argc)
{
int idx;
uint16_t val;
struct bnx2x_softc *sc = params->sc;
+
+ /* Step 1: Poll the STATUS register to see whether the previous command
+ * is in progress or the system is busy (CMD_IN_PROGRESS or
+ * SYSTEM_BUSY). If previous command is in progress or system is busy,
+ * check again until the previous command finishes execution and the
+ * system is available for taking command
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+ (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+ break;
+ DELAY(1000 * 1);
+ }
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
+ ELINK_DEBUG_P0(sc, "FW cmd: FW not ready.");
+ return ELINK_STATUS_ERROR;
+ }
+
+ /* Step2: If any parameters are required for the function, write them
+ * to the required DATA registers
+ */
+
+ for (idx = 0; idx < argc; idx++) {
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
+
+ /* Step3: When the firmware is ready for commands, write the 'Command
+ * code' to the CMD register
+ */
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+ /* Step4: Once the command has been written, poll the STATUS register
+ * to check whether the command has completed (CMD_COMPLETED_PASS/
+ * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+ break;
+ DELAY(1000 * 1);
+ }
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+ ELINK_DEBUG_P0(sc, "FW cmd failed.");
+ return ELINK_STATUS_ERROR;
+ }
+ /* Step5: Once the command has completed, read the specficied DATA
+ * registers for any saved results for the command, if applicable
+ */
+
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
+
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_84833_cmd_hdlr(struct elink_phy *phy,
+ struct elink_params *params, uint16_t fw_cmd,
+ uint16_t cmd_args[], int argc, int process)
+{
+ int idx;
+ uint16_t val;
+ struct bnx2x_softc *sc = params->sc;
+ elink_status_t rc = ELINK_STATUS_OK;
+
+ if (process == PHY84833_MB_PROCESS2) {
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
- PHY84833_STATUS_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ }
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
DELAY(1000 * 1);
}
- if (idx >= PHY84833_CMDHDLR_WAIT) {
- PMD_DRV_LOG(DEBUG, "FW cmd: FW not ready.");
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
+ ELINK_DEBUG_P0(sc, "FW cmd: FW not ready.");
+ /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
+ * clear the status to CMD_CLEAR_COMPLETE
+ */
+ if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
+ val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
+ elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+ }
return ELINK_STATUS_ERROR;
}
-
- /* Prepare argument(s) and issue command */
+ if (process == PHY84833_MB_PROCESS1 ||
+ process == PHY84833_MB_PROCESS2) {
+ /* Prepare argument(s) */
for (idx = 0; idx < argc; idx++) {
elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
- cmd_args[idx]);
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
}
+
+ /* Issue command */
elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
DELAY(1000 * 1);
}
- if ((idx >= PHY84833_CMDHDLR_WAIT) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
- PMD_DRV_LOG(DEBUG, "FW cmd failed.");
- return ELINK_STATUS_ERROR;
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ ELINK_DEBUG_P0(sc, "FW cmd failed.");
+ rc = ELINK_STATUS_ERROR;
}
+ if (process == PHY84833_MB_PROCESS3 && rc == ELINK_STATUS_OK) {
/* Gather returning data */
for (idx = 0; idx < argc; idx++) {
elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
}
+ }
+ if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
+ val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
- PHY84833_STATUS_CMD_CLEAR_COMPLETE);
- return ELINK_STATUS_OK;
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+ }
+ return rc;
}
-static elink_status_t elink_84833_pair_swap_cfg(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars
- *vars)
+static elink_status_t elink_848xx_cmd_hdlr(struct elink_phy *phy,
+ struct elink_params *params,
+ uint16_t fw_cmd,
+ uint16_t cmd_args[], int argc,
+ int process)
+{
+ struct bnx2x_softc *sc = params->sc;
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858) ||
+ (REG_RD(sc, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port])) &
+ LINK_ATTR_84858)) {
+ return elink_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ } else {
+ return elink_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc, process);
+ }
+}
+
+static elink_status_t elink_848xx_pair_swap_cfg(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars *vars)
{
uint32_t pair_swap;
- uint16_t data[PHY84833_CMDHDLR_MAX_ARGS];
+ uint16_t data[PHY848xx_CMDHDLR_MAX_ARGS];
elink_status_t status;
struct bnx2x_softc *sc = params->sc;
/* Check for configuration. */
pair_swap = REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- xgbt_phy_cfg)) &
- PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
if (pair_swap == 0)
return ELINK_STATUS_OK;
/* Only the second argument is used for this command */
- data[1] = (uint16_t) pair_swap;
+ data[1] = (uint16_t)pair_swap;
- status = elink_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data,
- PHY84833_CMDHDLR_MAX_ARGS);
- if (status == ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Pairswap OK, val=0x%x", data[1]);
- }
+ status = elink_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_PAIR_SWAP, data,
+ 2, PHY84833_MB_PROCESS2);
+ if (status == ELINK_STATUS_OK)
+ ELINK_DEBUG_P1(sc, "Pairswap OK, val=0x%x", data[1]);
return status;
}
static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc,
- uint32_t shmem_base_path[],
- __rte_unused uint32_t chip_id)
+ uint32_t shmem_base_path[],
+ __rte_unused uint32_t chip_id)
{
uint32_t reset_pin[2];
uint32_t idx;
@@ -9414,54 +11147,50 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc,
for (idx = 0; idx < 2; idx++) {
/* Map config param to register bit. */
reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] +
- offsetof(struct shmem_region,
- dev_info.
- port_hw_config[0].
- e3_cmn_pin_cfg));
- reset_pin[idx] =
- (reset_pin[idx] & PORT_HW_CFG_E3_PHY_RESET_MASK) >>
- PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+ reset_pin[idx] = (reset_pin[idx] &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
reset_pin[idx] -= PIN_CFG_GPIO0_P0;
reset_pin[idx] = (1 << reset_pin[idx]);
}
- reset_gpios = (uint8_t) (reset_pin[0] | reset_pin[1]);
+ reset_gpios = (uint8_t)(reset_pin[0] | reset_pin[1]);
} else {
/* E2, look from diff place of shmem. */
for (idx = 0; idx < 2; idx++) {
reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] +
- offsetof(struct shmem_region,
- dev_info.
- port_hw_config[0].
- default_cfg));
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].default_cfg));
reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
reset_pin[idx] = (1 << reset_pin[idx]);
}
- reset_gpios = (uint8_t) (reset_pin[0] | reset_pin[1]);
+ reset_gpios = (uint8_t)(reset_pin[0] | reset_pin[1]);
}
return reset_gpios;
}
static void elink_84833_hw_reset_phy(struct elink_phy *phy,
- struct elink_params *params)
+ struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
uint8_t reset_gpios;
uint32_t other_shmem_base_addr = REG_RD(sc, params->shmem2_base +
- offsetof(struct shmem2_region,
- other_shmem_base_addr));
+ offsetof(struct shmem2_region,
+ other_shmem_base_addr));
uint32_t shmem_base_path[2];
/* Work around for 84833 LED failure inside RESET status */
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ MDIO_AN_REG_8481_MII_CTRL_FORCE_1G);
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
- MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
+ MDIO_AN_REG_8481_1G_100T_EXT_CTRL,
+ MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF);
shmem_base_path[0] = params->shmem_base;
shmem_base_path[1] = other_shmem_base_addr;
@@ -9472,23 +11201,25 @@ static void elink_84833_hw_reset_phy(struct elink_phy *phy,
elink_cb_gpio_mult_write(sc, reset_gpios,
MISC_REGISTERS_GPIO_OUTPUT_LOW);
DELAY(10);
- PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios);
+ ELINK_DEBUG_P1(sc, "84833 hw reset on pin values 0x%x",
+ reset_gpios);
}
static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
elink_status_t rc;
+ struct bnx2x_softc *sc = params->sc;
uint16_t cmd_args = 0;
- PMD_DRV_LOG(DEBUG, "Don't Advertise 10GBase-T EEE");
+ ELINK_DEBUG_P0(sc, "Don't Advertise 10GBase-T EEE");
/* Prevent Phy from working in EEE and advertising it */
- rc = elink_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = elink_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
+ &cmd_args, 1, PHY84833_MB_PROCESS1);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "EEE disable failed.");
+ ELINK_DEBUG_P0(sc, "EEE disable failed.");
return rc;
}
@@ -9496,16 +11227,17 @@ static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
}
static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
elink_status_t rc;
+ struct bnx2x_softc *sc = params->sc;
uint16_t cmd_args = 1;
- rc = elink_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = elink_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
+ &cmd_args, 1, PHY84833_MB_PROCESS1);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "EEE enable failed.");
+ ELINK_DEBUG_P0(sc, "EEE enable failed.");
return rc;
}
@@ -9514,14 +11246,14 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
#define PHY84833_CONSTANT_LATENCY 1193
static uint8_t elink_848x3_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port, initialize = 1;
uint16_t val;
uint32_t actual_phy_selection;
- uint16_t cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+ uint16_t cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
elink_status_t rc = ELINK_STATUS_OK;
DELAY(1000 * 1);
@@ -9533,19 +11265,20 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) {
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
} else {
/* MDIO reset */
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x8000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x8000);
}
elink_wait_reset_complete(sc, phy, params);
/* Wait for GPHY to come out of reset */
DELAY(1000 * 50);
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ if (!elink_is_8483x_8485x(phy)) {
/* BNX2X84823 requires that XGXS links up first @ 10G for normal
* behavior.
*/
@@ -9556,7 +11289,19 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
elink_program_serdes(&params->phy[ELINK_INT_PHY], params, vars);
vars->line_speed = temp;
}
+ /* Check if this is actually BNX2X84858 */
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858) {
+ uint16_t hw_rev;
+
+ elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+ if (hw_rev == BNX2X84858_PHY_ID) {
+ params->link_attr_sync |= LINK_ATTR_84858;
+ elink_update_link_attr(params, params->link_attr_sync);
+ }
+ }
+ /* Set dual-media configuration according to configuration */
elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -9598,39 +11343,33 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, val);
- PMD_DRV_LOG(DEBUG, "Multi_phy config = 0x%x, Media control = 0x%x",
- params->multi_phy_config, val);
+ ELINK_DEBUG_P2(sc, "Multi_phy config = 0x%x, Media control = 0x%x",
+ params->multi_phy_config, val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
- elink_84833_pair_swap_cfg(phy, params, vars);
+ if (elink_is_8483x_8485x(phy)) {
+ elink_848xx_pair_swap_cfg(phy, params, vars);
/* Keep AutogrEEEn disabled. */
cmd_args[0] = 0x0;
cmd_args[1] = 0x0;
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
- rc = elink_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args,
- PHY84833_CMDHDLR_MAX_ARGS);
- if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Cfg AutogrEEEn failed.");
- }
+ rc = elink_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+ 4, PHY84833_MB_PROCESS1);
+ if (rc != ELINK_STATUS_OK)
+ ELINK_DEBUG_P0(sc, "Cfg AutogrEEEn failed.");
}
- if (initialize) {
+ if (initialize)
rc = elink_848xx_cmn_config_init(phy, params, vars);
- } else {
+ else
elink_save_848xx_spirom_version(phy, sc, params->port);
- }
/* 84833 PHY has a better feature and doesn't need to support this. */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) {
uint32_t cms_enable = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.
- port_hw_config[params->
- port].
- default_cfg)) &
- PORT_HW_CFG_ENABLE_CMS_MASK;
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
elink_cl45_read(sc, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
@@ -9651,7 +11390,7 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
elink_eee_has_cap(params)) {
rc = elink_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers");
+ ELINK_DEBUG_P0(sc, "Failed to configure EEE timers");
elink_8483x_disable_eee(phy, params, vars);
return rc;
}
@@ -9664,39 +11403,40 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
else
rc = elink_8483x_disable_eee(phy, params, vars);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Failed to set EEE advertisement");
+ ELINK_DEBUG_P0(sc, "Failed to set EEE advertisement");
return rc;
}
} else {
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) {
+ if (elink_is_8483x_8485x(phy)) {
/* Bring PHY out of super isolate mode as the final step. */
elink_cl45_read_and_write(sc, phy,
MDIO_CTL_DEVAD,
MDIO_84833_TOP_CFG_XGPHY_STRAP1,
- (uint16_t) ~
- MDIO_84833_SUPER_ISOLATE);
+ (uint16_t)~MDIO_84833_SUPER_ISOLATE);
}
return rc;
}
static uint8_t elink_848xx_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val, val1, val2;
uint8_t link_up = 0;
+
/* Check 10G-BaseT link status */
/* Check PMD signal ok */
- elink_cl45_read(sc, phy, MDIO_AN_DEVAD, 0xFFFA, &val1);
elink_cl45_read(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, &val2);
- PMD_DRV_LOG(DEBUG, "BNX2X848xx: PMD_SIGNAL 1.a811 = 0x%x", val2);
+ MDIO_AN_DEVAD, 0xFFFA, &val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ &val2);
+ ELINK_DEBUG_P1(sc, "BNX2X848xx: PMD_SIGNAL 1.a811 = 0x%x", val2);
/* Check link 10G */
if (val2 & (1 << 11)) {
@@ -9704,8 +11444,8 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
vars->duplex = DUPLEX_FULL;
link_up = 1;
elink_ext_phy_10G_an_resolve(sc, phy, vars);
- } else { /* Check Legacy speed link */
- uint16_t legacy_status, legacy_speed, mii_ctrl;
+ } else { /* Check Legacy speed link */
+ uint16_t legacy_status, legacy_speed;
/* Enable expansion register 0x42 (Operation mode status) */
elink_cl45_write(sc, phy,
@@ -9718,7 +11458,8 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
&legacy_status);
- PMD_DRV_LOG(DEBUG, "Legacy speed status = 0x%x", legacy_status);
+ ELINK_DEBUG_P1(sc, "Legacy speed status = 0x%x",
+ legacy_status);
link_up = ((legacy_status & (1 << 11)) == (1 << 11));
legacy_speed = (legacy_status & (3 << 9));
if (legacy_speed == (0 << 9))
@@ -9727,13 +11468,15 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
vars->line_speed = ELINK_SPEED_100;
else if (legacy_speed == (2 << 9))
vars->line_speed = ELINK_SPEED_1000;
- else { /* Should not happen: Treat as link down */
+ else { /* Should not happen: Treat as link down */
vars->line_speed = 0;
link_up = 0;
}
if (params->feature_config_flags &
- ELINK_FEATURE_CONFIG_IEEE_PHY_TEST) {
+ ELINK_FEATURE_CONFIG_IEEE_PHY_TEST) {
+ uint16_t mii_ctrl;
+
elink_cl45_read(sc, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_LEGACY_MII_CTRL,
@@ -9748,10 +11491,10 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
else
vars->duplex = DUPLEX_HALF;
- PMD_DRV_LOG(DEBUG,
- "Link is up in %dMbps, is_duplex_full= %d",
- vars->line_speed,
- (vars->duplex == DUPLEX_FULL));
+ ELINK_DEBUG_P2(sc,
+ "Link is up in %dMbps, is_duplex_full= %d",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
/* Check legacy speed AN resolution */
elink_cl45_read(sc, phy,
MDIO_AN_DEVAD,
@@ -9759,19 +11502,19 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
&val);
if (val & (1 << 5))
vars->link_status |=
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
elink_cl45_read(sc, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
&val);
if ((val & (1 << 0)) == 0)
vars->link_status |=
- LINK_STATUS_PARALLEL_DETECTION_USED;
+ LINK_STATUS_PARALLEL_DETECTION_USED;
}
}
if (link_up) {
- PMD_DRV_LOG(DEBUG, "BNX2X848x3: link speed is %d",
- vars->line_speed);
+ ELINK_DEBUG_P1(sc, "BNX2X848x3: link speed is %d",
+ vars->line_speed);
elink_ext_phy_resolve_fc(phy, params, vars);
/* Read LP advertised speeds */
@@ -9779,48 +11522,47 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
MDIO_AN_REG_CL37_FC_LP, &val);
if (val & (1 << 5))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
if (val & (1 << 6))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
if (val & (1 << 7))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
if (val & (1 << 8))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
if (val & (1 << 9))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_1000T_STATUS, &val);
if (val & (1 << 10))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
if (val & (1 << 11))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_MASTER_STATUS, &val);
if (val & (1 << 11))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834))
+ if (elink_is_8483x_8485x(phy))
elink_eee_an_resolve(phy, params, vars);
}
return link_up;
}
-static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
- uint16_t * len)
+static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t *str,
+ uint16_t *len)
{
elink_status_t status = ELINK_STATUS_OK;
uint32_t spirom_ver;
@@ -9833,17 +11575,18 @@ static void elink_8481_hw_reset(__rte_unused struct elink_phy *phy,
struct elink_params *params)
{
elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
}
static void elink_8481_link_reset(struct elink_phy *phy,
- struct elink_params *params)
+ struct elink_params *params)
{
elink_cl45_write(params->sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
- elink_cl45_write(params->sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+ elink_cl45_write(params->sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
}
static void elink_848x3_link_reset(struct elink_phy *phy,
@@ -9860,7 +11603,8 @@ static void elink_848x3_link_reset(struct elink_phy *phy,
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) {
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
} else {
elink_cl45_read(sc, phy,
MDIO_CTL_DEVAD,
@@ -9877,47 +11621,52 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
{
struct bnx2x_softc *sc = params->sc;
uint16_t val;
- __rte_unused uint8_t port;
+ uint8_t port;
if (!(CHIP_IS_E1x(sc)))
port = SC_PATH(sc);
else
port = params->port;
-
switch (mode) {
case ELINK_LED_MODE_OFF:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OFF", port);
+ ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE OFF", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
/* Set LED masks */
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK, 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK, 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK, 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
} else {
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
}
break;
case ELINK_LED_MODE_FRONT_PANEL_OFF:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE FRONT PANEL OFF", port);
+ ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE FRONT PANEL OFF",
+ port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -9925,25 +11674,31 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
/* Set LED masks */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK, 0x0);
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK, 0x0);
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK, 0x20);
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
} else {
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, 0x0);
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
/* Disable MI_INT interrupt before setting LED4
* source to constant off.
*/
@@ -9951,12 +11706,13 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
params->port * 4) &
ELINK_NIG_MASK_MI_INT) {
params->link_flags |=
- ELINK_LINK_FLAGS_INT_DISABLED;
+ ELINK_LINK_FLAGS_INT_DISABLED;
- elink_bits_dis(sc,
- NIG_REG_MASK_INTERRUPT_PORT0
- + params->port * 4,
- ELINK_NIG_MASK_MI_INT);
+ elink_bits_dis(
+ sc,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port * 4,
+ ELINK_NIG_MASK_MI_INT);
}
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
@@ -9967,42 +11723,50 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
break;
case ELINK_LED_MODE_ON:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE ON", port);
+ ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE ON", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
/* Set control reg */
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
val &= 0x8000;
val |= 0x2492;
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
/* Set LED masks */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, 0x0);
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK, 0x20);
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK, 0x20);
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK, 0x0);
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
} else {
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, 0x20);
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
/* Disable MI_INT interrupt before setting LED4
* source to constant on.
*/
@@ -10010,12 +11774,13 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
params->port * 4) &
ELINK_NIG_MASK_MI_INT) {
params->link_flags |=
- ELINK_LINK_FLAGS_INT_DISABLED;
+ ELINK_LINK_FLAGS_INT_DISABLED;
- elink_bits_dis(sc,
- NIG_REG_MASK_INTERRUPT_PORT0
- + params->port * 4,
- ELINK_NIG_MASK_MI_INT);
+ elink_bits_dis(
+ sc,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port * 4,
+ ELINK_NIG_MASK_MI_INT);
}
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
@@ -10027,7 +11792,7 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
case ELINK_LED_MODE_OPER:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OPER", port);
+ ELINK_DEBUG_P1(sc, "Port 0x%x: LED MODE OPER", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -10035,14 +11800,13 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
/* Set control reg */
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
if (!((val &
MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
- >>
- MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT))
- {
- PMD_DRV_LOG(DEBUG, "Setting LINK_SIGNAL");
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ ELINK_DEBUG_P0(sc, "Setting LINK_SIGNAL");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -10052,19 +11816,23 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
/* Set LED masks */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, 0x10);
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK, 0x80);
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK, 0x98);
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK, 0x40);
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
} else {
/* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
@@ -10077,18 +11845,22 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK, val);
+ MDIO_PMA_REG_8481_LED1_MASK,
+ val);
/* Tell LED3 to blink on source */
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
val &= ~(7 << 6);
- val |= (1 << 6); /* A83B[8:6]= 1 */
+ val |= (1 << 6); /* A83B[8:6]= 1 */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, val);
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) {
/* Restore LED4 source to external link,
* and re-enable interrupts.
*/
@@ -10100,14 +11872,14 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
ELINK_LINK_FLAGS_INT_DISABLED) {
elink_link_int_enable(params);
params->link_flags &=
- ~ELINK_LINK_FLAGS_INT_DISABLED;
+ ~ELINK_LINK_FLAGS_INT_DISABLED;
}
}
}
break;
}
- /* This is a workaround for E3+84833 until autoneg
+ /* This is a workaround for E3 + 84833 until autoneg
* restart is fixed in f/w
*/
if (CHIP_IS_E3(sc)) {
@@ -10132,7 +11904,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy,
elink_cl22_write(sc, phy,
MDIO_REG_GPHY_SHADOW,
MDIO_REG_GPHY_SHADOW_LED_SEL2);
- elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
+ elink_cl22_read(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
temp &= ~(0xf << 4);
temp |= (0x6 << 4);
elink_cl22_write(sc, phy,
@@ -10147,15 +11921,15 @@ static void elink_54618se_specific_func(struct elink_phy *phy,
}
static uint8_t elink_54618se_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port;
uint16_t autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
uint32_t cfg_pin;
- PMD_DRV_LOG(DEBUG, "54618SE cfg init");
+ ELINK_DEBUG_P0(sc, "54618SE cfg init");
DELAY(1000 * 1);
/* This works with E3 only, no need to check the chip
@@ -10164,11 +11938,10 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
port = params->port;
cfg_pin = (REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- e3_cmn_pin_cfg)) &
- PORT_HW_CFG_E3_PHY_RESET_MASK) >>
- PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
/* Drive pin high to bring the GPHY out of reset. */
elink_set_cfg_pin(sc, cfg_pin, 1);
@@ -10177,63 +11950,76 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
DELAY(1000 * 50);
/* reset phy */
- elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x8000);
+ elink_cl22_write(sc, phy,
+ MDIO_PMA_REG_CTRL, 0x8000);
elink_wait_reset_complete(sc, phy, params);
/* Wait for GPHY to reset */
DELAY(1000 * 50);
+
elink_54618se_specific_func(phy, params, ELINK_PHY_INIT);
/* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
elink_cl22_write(sc, phy,
- MDIO_REG_GPHY_SHADOW,
- MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
- elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+ elink_cl22_read(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
elink_cl22_write(sc, phy,
- MDIO_REG_GPHY_SHADOW,
- MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
/* Set up fc */
/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
fc_val = 0;
if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
/* Read all advertisement */
- elink_cl22_read(sc, phy, 0x09, &an_1000_val);
+ elink_cl22_read(sc, phy,
+ 0x09,
+ &an_1000_val);
- elink_cl22_read(sc, phy, 0x04, &an_10_100_val);
+ elink_cl22_read(sc, phy,
+ 0x04,
+ &an_10_100_val);
- elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &autoneg_val);
+ elink_cl22_read(sc, phy,
+ MDIO_PMA_REG_CTRL,
+ &autoneg_val);
/* Disable forced speed */
- autoneg_val &=
- ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 13));
- an_10_100_val &=
- ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 10) |
- (1 << 11));
+ autoneg_val &= ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) |
+ (1 << 13));
+ an_10_100_val &= ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) |
+ (1 << 10) | (1 << 11));
if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
- (phy->req_line_speed == ELINK_SPEED_1000)) {
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == ELINK_SPEED_1000)) {
an_1000_val |= (1 << 8);
autoneg_val |= (1 << 9 | 1 << 12);
if (phy->req_duplex == DUPLEX_FULL)
an_1000_val |= (1 << 9);
- PMD_DRV_LOG(DEBUG, "Advertising 1G");
+ ELINK_DEBUG_P0(sc, "Advertising 1G");
} else
an_1000_val &= ~((1 << 8) | (1 << 9));
- elink_cl22_write(sc, phy, 0x09, an_1000_val);
- elink_cl22_read(sc, phy, 0x09, &an_1000_val);
+ elink_cl22_write(sc, phy,
+ 0x09,
+ an_1000_val);
+ elink_cl22_read(sc, phy,
+ 0x09,
+ &an_1000_val);
/* Advertise 10/100 link speed */
if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) {
@@ -10241,25 +12027,25 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
an_10_100_val |= (1 << 5);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-HD");
+ ELINK_DEBUG_P0(sc, "Advertising 10M-HD");
}
if (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
an_10_100_val |= (1 << 6);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-FD");
+ ELINK_DEBUG_P0(sc, "Advertising 10M-FD");
}
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
an_10_100_val |= (1 << 7);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-HD");
+ ELINK_DEBUG_P0(sc, "Advertising 100M-HD");
}
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
an_10_100_val |= (1 << 8);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-FD");
+ ELINK_DEBUG_P0(sc, "Advertising 100M-FD");
}
}
@@ -10267,13 +12053,17 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
if (phy->req_line_speed == ELINK_SPEED_100) {
autoneg_val |= (1 << 13);
/* Enabled AUTO-MDIX when autoneg is disabled */
- elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
- PMD_DRV_LOG(DEBUG, "Setting 100M force");
+ elink_cl22_write(sc, phy,
+ 0x18,
+ (1 << 15 | 1 << 9 | 7 << 0));
+ ELINK_DEBUG_P0(sc, "Setting 100M force");
}
if (phy->req_line_speed == ELINK_SPEED_10) {
/* Enabled AUTO-MDIX when autoneg is disabled */
- elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
- PMD_DRV_LOG(DEBUG, "Setting 10M force");
+ elink_cl22_write(sc, phy,
+ 0x18,
+ (1 << 15 | 1 << 9 | 7 << 0));
+ ELINK_DEBUG_P0(sc, "Setting 10M force");
}
if ((phy->flags & ELINK_FLAGS_EEE) && elink_eee_has_cap(params)) {
@@ -10288,7 +12078,7 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
rc = elink_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers");
+ ELINK_DEBUG_P0(sc, "Failed to configure EEE timers");
elink_eee_disable(phy, params, vars);
} else if ((params->eee_mode & ELINK_EEE_MODE_ADV_LPI) &&
(phy->req_duplex == DUPLEX_FULL) &&
@@ -10302,38 +12092,42 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
elink_eee_advertise(phy, params, vars,
SHMEM_EEE_1G_ADV);
} else {
- PMD_DRV_LOG(DEBUG, "Don't Advertise 1GBase-T EEE");
+ ELINK_DEBUG_P0(sc, "Don't Advertise 1GBase-T EEE");
elink_eee_disable(phy, params, vars);
}
} else {
- vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
- SHMEM_EEE_SUPPORTED_SHIFT;
+ vars->eee_status &= ((uint32_t)(~SHMEM_EEE_1G_ADV) <<
+ SHMEM_EEE_SUPPORTED_SHIFT);
if (phy->flags & ELINK_FLAGS_EEE) {
/* Handle legacy auto-grEEEn */
if (params->feature_config_flags &
ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
temp = 6;
- PMD_DRV_LOG(DEBUG, "Enabling Auto-GrEEEn");
+ ELINK_DEBUG_P0(sc, "Enabling Auto-GrEEEn");
} else {
temp = 0;
- PMD_DRV_LOG(DEBUG, "Don't Adv. EEE");
+ ELINK_DEBUG_P0(sc, "Don't Adv. EEE");
}
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_EEE_ADV, temp);
}
}
- elink_cl22_write(sc, phy, 0x04, an_10_100_val | fc_val);
+ elink_cl22_write(sc, phy,
+ 0x04,
+ an_10_100_val | fc_val);
if (phy->req_duplex == DUPLEX_FULL)
autoneg_val |= (1 << 8);
- elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, autoneg_val);
+ elink_cl22_write(sc, phy,
+ MDIO_PMA_REG_CTRL, autoneg_val);
return ELINK_STATUS_OK;
}
+
static void elink_5461x_set_link_led(struct elink_phy *phy,
struct elink_params *params, uint8_t mode)
{
@@ -10341,11 +12135,14 @@ static void elink_5461x_set_link_led(struct elink_phy *phy,
uint16_t temp;
elink_cl22_write(sc, phy,
- MDIO_REG_GPHY_SHADOW, MDIO_REG_GPHY_SHADOW_LED_SEL1);
- elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ elink_cl22_read(sc, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
temp &= 0xff00;
- PMD_DRV_LOG(DEBUG, "54618x set link led (mode=%x)", mode);
+ ELINK_DEBUG_P1(sc, "54618x set link led (mode=%x)", mode);
switch (mode) {
case ELINK_LED_MODE_FRONT_PANEL_OFF:
case ELINK_LED_MODE_OFF:
@@ -10361,11 +12158,12 @@ static void elink_5461x_set_link_led(struct elink_phy *phy,
break;
}
elink_cl22_write(sc, phy,
- MDIO_REG_GPHY_SHADOW,
- MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
return;
}
+
static void elink_54618se_link_reset(struct elink_phy *phy,
struct elink_params *params)
{
@@ -10382,19 +12180,18 @@ static void elink_54618se_link_reset(struct elink_phy *phy,
*/
port = params->port;
cfg_pin = (REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- e3_cmn_pin_cfg)) &
- PORT_HW_CFG_E3_PHY_RESET_MASK) >>
- PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
/* Drive pin low to put GPHY in reset. */
elink_set_cfg_pin(sc, cfg_pin, 0);
}
static uint8_t elink_54618se_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val;
@@ -10402,11 +12199,15 @@ static uint8_t elink_54618se_read_status(struct elink_phy *phy,
uint16_t legacy_status, legacy_speed;
/* Get speed operation status */
- elink_cl22_read(sc, phy, MDIO_REG_GPHY_AUX_STATUS, &legacy_status);
- PMD_DRV_LOG(DEBUG, "54618SE read_status: 0x%x", legacy_status);
+ elink_cl22_read(sc, phy,
+ MDIO_REG_GPHY_AUX_STATUS,
+ &legacy_status);
+ ELINK_DEBUG_P1(sc, "54618SE read_status: 0x%x", legacy_status);
/* Read status to clear the PHY interrupt. */
- elink_cl22_read(sc, phy, MDIO_REG_INTR_STATUS, &val);
+ elink_cl22_read(sc, phy,
+ MDIO_REG_INTR_STATUS,
+ &val);
link_up = ((legacy_status & (1 << 2)) == (1 << 2));
@@ -10432,25 +12233,30 @@ static uint8_t elink_54618se_read_status(struct elink_phy *phy,
} else if (legacy_speed == (1 << 8)) {
vars->line_speed = ELINK_SPEED_10;
vars->duplex = DUPLEX_HALF;
- } else /* Should not happen */
+ } else /* Should not happen */
vars->line_speed = 0;
- PMD_DRV_LOG(DEBUG,
- "Link is up in %dMbps, is_duplex_full= %d",
- vars->line_speed, (vars->duplex == DUPLEX_FULL));
+ ELINK_DEBUG_P2(sc,
+ "Link is up in %dMbps, is_duplex_full= %d",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
/* Check legacy speed AN resolution */
- elink_cl22_read(sc, phy, 0x01, &val);
+ elink_cl22_read(sc, phy,
+ 0x01,
+ &val);
if (val & (1 << 5))
vars->link_status |=
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
- elink_cl22_read(sc, phy, 0x06, &val);
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ elink_cl22_read(sc, phy,
+ 0x06,
+ &val);
if ((val & (1 << 0)) == 0)
vars->link_status |=
- LINK_STATUS_PARALLEL_DETECTION_USED;
+ LINK_STATUS_PARALLEL_DETECTION_USED;
- PMD_DRV_LOG(DEBUG, "BNX2X54618SE: link speed is %d",
- vars->line_speed);
+ ELINK_DEBUG_P1(sc, "BNX2X4618SE: link speed is %d",
+ vars->line_speed);
elink_ext_phy_resolve_fc(phy, params, vars);
@@ -10460,27 +12266,27 @@ static uint8_t elink_54618se_read_status(struct elink_phy *phy,
if (val & (1 << 5))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
if (val & (1 << 6))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
if (val & (1 << 7))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
if (val & (1 << 8))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
if (val & (1 << 9))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
elink_cl22_read(sc, phy, 0xa, &val);
if (val & (1 << 10))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
if (val & (1 << 11))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
if ((phy->flags & ELINK_FLAGS_EEE) &&
elink_eee_has_cap(params))
@@ -10497,7 +12303,7 @@ static void elink_54618se_config_loopback(struct elink_phy *phy,
uint16_t val;
uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- PMD_DRV_LOG(DEBUG, "2PMA/PMD ext_phy_loopback: 54618se");
+ ELINK_DEBUG_P0(sc, "2PMA/PMD ext_phy_loopback: 54618se");
/* Enable master/slave manual mmode and set to master */
/* mii write 9 [bits set 11 12] */
@@ -10543,30 +12349,33 @@ static void elink_7101_config_loopback(struct elink_phy *phy,
}
static uint8_t elink_7101_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint16_t fw_ver1, fw_ver2, val;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LASI indication");
+ ELINK_DEBUG_P0(sc, "Setting the SFX7101 LASI indication");
- /* Restore normal power mode */
+ /* Restore normal power mode*/
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
elink_ext_phy_hw_reset(sc, params->port);
elink_wait_reset_complete(sc, phy, params);
- elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
- PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LED to blink on traffic");
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
+ ELINK_DEBUG_P0(sc, "Setting the SFX7101 LED to blink on traffic");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1 << 3));
elink_ext_phy_set_pause(params, phy, vars);
/* Restart autoneg */
- elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+ elink_cl45_read(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
val |= 0x200;
- elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+ elink_cl45_write(sc, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
/* Save spirom version */
elink_cl45_read(sc, phy,
@@ -10575,24 +12384,30 @@ static uint8_t elink_7101_config_init(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
elink_save_spirom_version(sc, params->port,
- (uint32_t) (fw_ver1 << 16 | fw_ver2),
+ (uint32_t)(fw_ver1 << 16 | fw_ver2),
phy->ver_addr);
return ELINK_STATUS_OK;
}
static uint8_t elink_7101_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t link_up;
uint16_t val1, val2;
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
- PMD_DRV_LOG(DEBUG, "10G-base-T LASI status 0x%x->0x%x", val2, val1);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
- elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "10G-base-T PMA status 0x%x->0x%x", val2, val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ ELINK_DEBUG_P2(sc, "10G-base-T LASI status 0x%x->0x%x",
+ val2, val1);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ ELINK_DEBUG_P2(sc, "10G-base-T PMA status 0x%x->0x%x",
+ val2, val1);
link_up = ((val1 & 4) == 4);
/* If link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
@@ -10601,21 +12416,21 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy,
&val2);
vars->line_speed = ELINK_SPEED_10000;
vars->duplex = DUPLEX_FULL;
- PMD_DRV_LOG(DEBUG, "SFX7101 AN status 0x%x->Master=%x",
- val2, (val2 & (1 << 14)));
+ ELINK_DEBUG_P2(sc, "SFX7101 AN status 0x%x->Master=%x",
+ val2, (val2 & (1 << 14)));
elink_ext_phy_10G_an_resolve(sc, phy, vars);
elink_ext_phy_resolve_fc(phy, params, vars);
/* Read LP advertised speeds */
if (val2 & (1 << 11))
vars->link_status |=
- LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
return link_up;
}
-static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
- uint16_t * len)
+static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t *str,
+ uint16_t *len)
{
if (*len < 5)
return ELINK_STATUS_ERROR;
@@ -10628,15 +12443,39 @@ static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
return ELINK_STATUS_OK;
}
-static void elink_7101_hw_reset(__rte_unused struct elink_phy *phy,
- struct elink_params *params)
+void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy)
{
+ uint16_t val, cnt;
+
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ for (cnt = 0; cnt < 10; cnt++) {
+ DELAY(1000 * 50);
+ /* Writes a self-clearing reset */
+ elink_cl45_write(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1 << 15)));
+ /* Wait for clear */
+ elink_cl45_read(sc, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ if ((val & (1 << 15)) == 0)
+ break;
+ }
+}
+
+static void elink_7101_hw_reset(__rte_unused struct elink_phy *phy,
+ struct elink_params *params) {
/* Low power mode is controlled by GPIO 2 */
elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
/* The PHY reset is controlled by GPIO 1 */
elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
}
static void elink_7101_set_link_led(struct elink_phy *phy,
@@ -10657,7 +12496,9 @@ static void elink_7101_set_link_led(struct elink_phy *phy,
break;
}
elink_cl45_write(sc, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LINK_LED_CNTL, val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7107_LINK_LED_CNTL,
+ val);
}
/******************************************************************/
@@ -10665,482 +12506,532 @@ static void elink_7101_set_link_led(struct elink_phy *phy,
/******************************************************************/
static const struct elink_phy phy_null = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
- .addr = 0,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = 0,
- .media_type = ELINK_ETH_PHY_NOT_PRESENT,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = NULL,
- .read_status = NULL,
- .link_reset = NULL,
- .config_loopback = NULL,
- .format_fw_ver = NULL,
- .hw_reset = NULL,
- .set_link_led = NULL,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+ .addr = 0,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = 0,
+ .media_type = ELINK_ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)NULL,
+ .read_status = (read_status_t)NULL,
+ .link_reset = (link_reset_t)NULL,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
static const struct elink_phy phy_serdes = {
- .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = 0,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10baseT_Half |
- ELINK_SUPPORTED_10baseT_Full |
- ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_2500baseX_Full |
- ELINK_SUPPORTED_TP |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_BASE_T,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_xgxs_config_init,
- .read_status = elink_link_settings_status,
- .link_reset = elink_int_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = NULL,
- .hw_reset = NULL,
- .set_link_led = NULL,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_2500baseX_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_xgxs_config_init,
+ .read_status = (read_status_t)elink_link_settings_status,
+ .link_reset = (link_reset_t)elink_int_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
static const struct elink_phy phy_xgxs = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = 0,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10baseT_Half |
- ELINK_SUPPORTED_10baseT_Full |
- ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_2500baseX_Full |
- ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_FIBRE |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_CX4,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_xgxs_config_init,
- .read_status = elink_link_settings_status,
- .link_reset = elink_int_link_reset,
- .config_loopback = elink_set_xgxs_loopback,
- .format_fw_ver = NULL,
- .hw_reset = NULL,
- .set_link_led = NULL,
- .phy_specific_func = elink_xgxs_specific_func
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_2500baseX_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_CX4,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_xgxs_config_init,
+ .read_status = (read_status_t)elink_link_settings_status,
+ .link_reset = (link_reset_t)elink_int_link_reset,
+ .config_loopback = (config_loopback_t)elink_set_xgxs_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)elink_xgxs_specific_func
};
-
static const struct elink_phy phy_warpcore = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_TX_ERROR_CHECK,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10baseT_Half |
- ELINK_SUPPORTED_10baseT_Full |
- ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_20000baseKR2_Full |
- ELINK_SUPPORTED_20000baseMLD2_Full |
- ELINK_SUPPORTED_FIBRE |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_UNSPECIFIED,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- /* req_duplex = */ 0,
- /* rsrv = */ 0,
- .config_init = elink_warpcore_config_init,
- .read_status = elink_warpcore_read_status,
- .link_reset = elink_warpcore_link_reset,
- .config_loopback = elink_set_warpcore_loopback,
- .format_fw_ver = NULL,
- .hw_reset = elink_warpcore_hw_reset,
- .set_link_led = NULL,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_TX_ERROR_CHECK,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_1000baseKX_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_10000baseKR_Full |
+ ELINK_SUPPORTED_20000baseKR2_Full |
+ ELINK_SUPPORTED_20000baseMLD2_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)elink_warpcore_config_init,
+ .read_status = (read_status_t)elink_warpcore_read_status,
+ .link_reset = (link_reset_t)elink_warpcore_link_reset,
+ .config_loopback = (config_loopback_t)elink_set_warpcore_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)elink_warpcore_hw_reset,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
+
static const struct elink_phy phy_7101 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_TP |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_BASE_T,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_7101_config_init,
- .read_status = elink_7101_read_status,
- .link_reset = elink_common_ext_link_reset,
- .config_loopback = elink_7101_config_loopback,
- .format_fw_ver = elink_7101_format_ver,
- .hw_reset = elink_7101_hw_reset,
- .set_link_led = elink_7101_set_link_led,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_7101_config_init,
+ .read_status = (read_status_t)elink_7101_read_status,
+ .link_reset = (link_reset_t)elink_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)elink_7101_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)elink_7101_format_ver,
+ .hw_reset = (hw_reset_t)elink_7101_hw_reset,
+ .set_link_led = (set_link_led_t)elink_7101_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
-
static const struct elink_phy phy_8073 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = 0,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_2500baseX_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_FIBRE |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_KR,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_8073_config_init,
- .read_status = elink_8073_read_status,
- .link_reset = elink_8073_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_format_ver,
- .hw_reset = NULL,
- .set_link_led = NULL,
- .phy_specific_func = elink_8073_specific_func
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_2500baseX_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_KR,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_8073_config_init,
+ .read_status = (read_status_t)elink_8073_read_status,
+ .link_reset = (link_reset_t)elink_8073_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)elink_8073_specific_func
};
-
static const struct elink_phy phy_8705 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_FIBRE |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_XFP_FIBER,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_8705_config_init,
- .read_status = elink_8705_read_status,
- .link_reset = elink_common_ext_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_null_format_ver,
- .hw_reset = NULL,
- .set_link_led = NULL,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_XFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_8705_config_init,
+ .read_status = (read_status_t)elink_8705_read_status,
+ .link_reset = (link_reset_t)elink_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_null_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
-
static const struct elink_phy phy_8706 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_FIBRE |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_SFPP_10G_FIBER,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_8706_config_init,
- .read_status = elink_8706_read_status,
- .link_reset = elink_common_ext_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_format_ver,
- .hw_reset = NULL,
- .set_link_led = NULL,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_SFPP_10G_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_8706_config_init,
+ .read_status = (read_status_t)elink_8706_read_status,
+ .link_reset = (link_reset_t)elink_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
static const struct elink_phy phy_8726 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = (ELINK_FLAGS_INIT_XGXS_FIRST | ELINK_FLAGS_TX_ERROR_CHECK),
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_FIBRE |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_NOT_PRESENT,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_8726_config_init,
- .read_status = elink_8726_read_status,
- .link_reset = elink_8726_link_reset,
- .config_loopback = elink_8726_config_loopback,
- .format_fw_ver = elink_format_ver,
- .hw_reset = NULL,
- .set_link_led = NULL,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_INIT_XGXS_FIRST |
+ ELINK_FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_8726_config_init,
+ .read_status = (read_status_t)elink_8726_read_status,
+ .link_reset = (link_reset_t)elink_8726_link_reset,
+ .config_loopback = (config_loopback_t)elink_8726_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)elink_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
static const struct elink_phy phy_8727 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | ELINK_FLAGS_TX_ERROR_CHECK),
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_FIBRE |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_NOT_PRESENT,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_8727_config_init,
- .read_status = elink_8727_read_status,
- .link_reset = elink_8727_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_format_ver,
- .hw_reset = elink_8727_hw_reset,
- .set_link_led = elink_8727_set_link_led,
- .phy_specific_func = elink_8727_specific_func
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_FIBRE |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_8727_config_init,
+ .read_status = (read_status_t)elink_8727_read_status,
+ .link_reset = (link_reset_t)elink_8727_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_format_ver,
+ .hw_reset = (hw_reset_t)elink_8727_hw_reset,
+ .set_link_led = (set_link_led_t)elink_8727_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)elink_8727_specific_func
};
-
static const struct elink_phy phy_8481 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ |
- ELINK_FLAGS_REARM_LATCH_SIGNAL,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10baseT_Half |
- ELINK_SUPPORTED_10baseT_Full |
- ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_TP |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_BASE_T,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_8481_config_init,
- .read_status = elink_848xx_read_status,
- .link_reset = elink_8481_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_848xx_format_ver,
- .hw_reset = elink_8481_hw_reset,
- .set_link_led = elink_848xx_set_link_led,
- .phy_specific_func = NULL
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_8481_config_init,
+ .read_status = (read_status_t)elink_848xx_read_status,
+ .link_reset = (link_reset_t)elink_8481_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver,
+ .hw_reset = (hw_reset_t)elink_8481_hw_reset,
+ .set_link_led = (set_link_led_t)elink_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
};
static const struct elink_phy phy_84823 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ |
- ELINK_FLAGS_REARM_LATCH_SIGNAL | ELINK_FLAGS_TX_ERROR_CHECK),
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10baseT_Half |
- ELINK_SUPPORTED_10baseT_Full |
- ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_TP |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_BASE_T,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_848x3_config_init,
- .read_status = elink_848xx_read_status,
- .link_reset = elink_848x3_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_848xx_format_ver,
- .hw_reset = NULL,
- .set_link_led = elink_848xx_set_link_led,
- .phy_specific_func = elink_848xx_specific_func
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL |
+ ELINK_FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_848x3_config_init,
+ .read_status = (read_status_t)elink_848xx_read_status,
+ .link_reset = (link_reset_t)elink_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)elink_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func
};
static const struct elink_phy phy_84833 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ |
- ELINK_FLAGS_REARM_LATCH_SIGNAL |
- ELINK_FLAGS_TX_ERROR_CHECK | ELINK_FLAGS_TEMPERATURE),
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_TP |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_BASE_T,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_848x3_config_init,
- .read_status = elink_848xx_read_status,
- .link_reset = elink_848x3_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_848xx_format_ver,
- .hw_reset = elink_84833_hw_reset_phy,
- .set_link_led = elink_848xx_set_link_led,
- .phy_specific_func = elink_848xx_specific_func
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL |
+ ELINK_FLAGS_TX_ERROR_CHECK |
+ ELINK_FLAGS_TEMPERATURE),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_848x3_config_init,
+ .read_status = (read_status_t)elink_848xx_read_status,
+ .link_reset = (link_reset_t)elink_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver,
+ .hw_reset = (hw_reset_t)elink_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)elink_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func
};
static const struct elink_phy phy_84834 = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ |
- ELINK_FLAGS_REARM_LATCH_SIGNAL,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_TP |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_BASE_T,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- .req_duplex = 0,
- .rsrv = 0,
- .config_init = elink_848x3_config_init,
- .read_status = elink_848xx_read_status,
- .link_reset = elink_848x3_link_reset,
- .config_loopback = NULL,
- .format_fw_ver = elink_848xx_format_ver,
- .hw_reset = elink_84833_hw_reset_phy,
- .set_link_led = elink_848xx_set_link_led,
- .phy_specific_func = elink_848xx_specific_func
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_848x3_config_init,
+ .read_status = (read_status_t)elink_848xx_read_status,
+ .link_reset = (link_reset_t)elink_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver,
+ .hw_reset = (hw_reset_t)elink_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)elink_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func
};
-static const struct elink_phy phy_54618se = {
- .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE,
- .addr = 0xff,
- .def_md_devad = 0,
- .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
- .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
- .mdio_ctrl = 0,
- .supported = (ELINK_SUPPORTED_10baseT_Half |
- ELINK_SUPPORTED_10baseT_Full |
- ELINK_SUPPORTED_100baseT_Half |
- ELINK_SUPPORTED_100baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_TP |
- ELINK_SUPPORTED_Autoneg |
- ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause),
- .media_type = ELINK_ETH_PHY_BASE_T,
- .ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
- /* req_duplex = */ 0,
- /* rsrv = */ 0,
- .config_init = elink_54618se_config_init,
- .read_status = elink_54618se_read_status,
- .link_reset = elink_54618se_link_reset,
- .config_loopback = elink_54618se_config_loopback,
- .format_fw_ver = NULL,
- .hw_reset = NULL,
- .set_link_led = elink_5461x_set_link_led,
- .phy_specific_func = elink_54618se_specific_func
+static const struct elink_phy phy_84858 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ |
+ ELINK_FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)elink_848x3_config_init,
+ .read_status = (read_status_t)elink_848xx_read_status,
+ .link_reset = (link_reset_t)elink_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)elink_848xx_format_ver,
+ .hw_reset = (hw_reset_t)elink_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)elink_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)elink_848xx_specific_func
};
+
+static const struct elink_phy phy_54618se = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = ELINK_FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (ELINK_SUPPORTED_10baseT_Half |
+ ELINK_SUPPORTED_10baseT_Full |
+ ELINK_SUPPORTED_100baseT_Half |
+ ELINK_SUPPORTED_100baseT_Full |
+ ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_TP |
+ ELINK_SUPPORTED_Autoneg |
+ ELINK_SUPPORTED_Pause |
+ ELINK_SUPPORTED_Asym_Pause),
+ .media_type = ELINK_ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)elink_54618se_config_init,
+ .read_status = (read_status_t)elink_54618se_read_status,
+ .link_reset = (link_reset_t)elink_54618se_link_reset,
+ .config_loopback = (config_loopback_t)elink_54618se_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)elink_5461x_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)elink_54618se_specific_func
+};
/*****************************************************************/
/* */
/* Populate the phy according. Main function: elink_populate_phy */
@@ -11148,9 +13039,9 @@ static const struct elink_phy phy_54618se = {
/*****************************************************************/
static void elink_populate_preemphasis(struct bnx2x_softc *sc,
- uint32_t shmem_base,
- struct elink_phy *phy, uint8_t port,
- uint8_t phy_index)
+ uint32_t shmem_base,
+ struct elink_phy *phy, uint8_t port,
+ uint8_t phy_index)
{
/* Get the 4 lanes xgxs config rx and tx */
uint32_t rx = 0, tx = 0, i;
@@ -11162,23 +13053,19 @@ static void elink_populate_preemphasis(struct bnx2x_softc *sc,
if (phy_index == ELINK_INT_PHY || phy_index == ELINK_EXT_PHY1) {
rx = REG_RD(sc, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- xgxs_config_rx[i << 1]));
+ dev_info.port_hw_config[port].xgxs_config_rx[i << 1]));
tx = REG_RD(sc, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- xgxs_config_tx[i << 1]));
+ dev_info.port_hw_config[port].xgxs_config_tx[i << 1]));
} else {
rx = REG_RD(sc, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- xgxs_config2_rx[i << 1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i << 1]));
tx = REG_RD(sc, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- xgxs_config2_rx[i << 1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i << 1]));
}
phy->rx_preemphasis[i << 1] = ((rx >> 16) & 0xffff);
@@ -11186,65 +13073,62 @@ static void elink_populate_preemphasis(struct bnx2x_softc *sc,
phy->tx_preemphasis[i << 1] = ((tx >> 16) & 0xffff);
phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+ ELINK_DEBUG_P2(sc, "phy->rx_preemphasis = %x, phy->tx_preemphasis = %x",
+ phy->rx_preemphasis[i << 1],
+ phy->tx_preemphasis[i << 1]);
}
}
static uint32_t elink_get_ext_phy_config(struct bnx2x_softc *sc,
- uint32_t shmem_base, uint8_t phy_index,
- uint8_t port)
+ uint32_t shmem_base,
+ uint8_t phy_index, uint8_t port)
{
uint32_t ext_phy_config = 0;
switch (phy_index) {
case ELINK_EXT_PHY1:
ext_phy_config = REG_RD(sc, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- external_phy_config));
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
break;
case ELINK_EXT_PHY2:
ext_phy_config = REG_RD(sc, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- external_phy_config2));
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config2));
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid phy_index %d", phy_index);
+ ELINK_DEBUG_P1(sc, "Invalid phy_index %d", phy_index);
return ELINK_STATUS_ERROR;
}
return ext_phy_config;
}
-
static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
- uint32_t shmem_base, uint8_t port,
- struct elink_phy *phy)
+ uint32_t shmem_base, uint8_t port,
+ struct elink_phy *phy)
{
uint32_t phy_addr;
- __rte_unused uint32_t chip_id;
+ uint32_t chip_id;
uint32_t switch_cfg = (REG_RD(sc, shmem_base +
- offsetof(struct shmem_region,
- dev_info.
- port_feature_config[port].
- link_config)) &
- PORT_FEATURE_CONNECTED_SWITCH_MASK);
- chip_id =
- (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) |
- ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12);
-
- PMD_DRV_LOG(DEBUG, ":chip_id = 0x%x", chip_id);
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[port].link_config)) &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ chip_id = (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12);
+
+ ELINK_DEBUG_P1(sc, ":chip_id = 0x%x", chip_id);
if (USES_WARPCORE(sc)) {
uint32_t serdes_net_if;
- phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
+ phy_addr = REG_RD(sc,
+ MISC_REG_WC0_CTRL_PHY_ADDR);
*phy = phy_warpcore;
if (REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
phy->flags |= ELINK_FLAGS_4_PORT_MODE;
else
phy->flags &= ~ELINK_FLAGS_4_PORT_MODE;
- /* Check Dual mode */
+ /* Check Dual mode */
serdes_net_if = (REG_RD(sc, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- default_cfg)) &
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[port].default_cfg)) &
PORT_HW_CFG_NET_SERDES_IF_MASK);
/* Set the appropriate supported and flags indications per
* interface type of the chip
@@ -11280,8 +13164,8 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
break;
case PORT_HW_CFG_NET_SERDES_IF_KR:
phy->media_type = ELINK_ETH_PHY_KR;
- phy->supported &= (ELINK_SUPPORTED_1000baseT_Full |
- ELINK_SUPPORTED_10000baseT_Full |
+ phy->supported &= (ELINK_SUPPORTED_1000baseKX_Full |
+ ELINK_SUPPORTED_10000baseKR_Full |
ELINK_SUPPORTED_FIBRE |
ELINK_SUPPORTED_Autoneg |
ELINK_SUPPORTED_Pause |
@@ -11299,8 +13183,8 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
phy->media_type = ELINK_ETH_PHY_KR;
phy->flags |= ELINK_FLAGS_WC_DUAL_MODE;
phy->supported &= (ELINK_SUPPORTED_20000baseKR2_Full |
- ELINK_SUPPORTED_10000baseT_Full |
- ELINK_SUPPORTED_1000baseT_Full |
+ ELINK_SUPPORTED_10000baseKR_Full |
+ ELINK_SUPPORTED_1000baseKX_Full |
ELINK_SUPPORTED_Autoneg |
ELINK_SUPPORTED_FIBRE |
ELINK_SUPPORTED_Pause |
@@ -11308,8 +13192,8 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK;
break;
default:
- PMD_DRV_LOG(DEBUG, "Unknown WC interface type 0x%x",
- serdes_net_if);
+ ELINK_DEBUG_P1(sc, "Unknown WC interface type 0x%x",
+ serdes_net_if);
break;
}
@@ -11321,6 +13205,8 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
phy->flags |= ELINK_FLAGS_MDC_MDIO_WA;
else
phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_B0;
+ ELINK_DEBUG_P3(sc, "media_type = %x, flags = %x, supported = %x",
+ phy->media_type, phy->flags, phy->supported);
} else {
switch (switch_cfg) {
case ELINK_SWITCH_CFG_1G:
@@ -11336,32 +13222,32 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
*phy = phy_xgxs;
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid switch_cfg");
+ ELINK_DEBUG_P0(sc, "Invalid switch_cfg");
return ELINK_STATUS_ERROR;
}
}
- phy->addr = (uint8_t) phy_addr;
+ phy->addr = (uint8_t)phy_addr;
phy->mdio_ctrl = elink_get_emac_base(sc,
- SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
- port);
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+ port);
if (CHIP_IS_E2(sc))
phy->def_md_devad = ELINK_E2_DEFAULT_PHY_DEV_ADDR;
else
phy->def_md_devad = ELINK_DEFAULT_PHY_DEV_ADDR;
- PMD_DRV_LOG(DEBUG, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x",
- port, phy->addr, phy->mdio_ctrl);
+ ELINK_DEBUG_P3(sc, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x",
+ port, phy->addr, phy->mdio_ctrl);
elink_populate_preemphasis(sc, shmem_base, phy, port, ELINK_INT_PHY);
return ELINK_STATUS_OK;
}
static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc,
- uint8_t phy_index,
- uint32_t shmem_base,
- uint32_t shmem2_base,
- uint8_t port,
- struct elink_phy *phy)
+ uint8_t phy_index,
+ uint32_t shmem_base,
+ uint32_t shmem2_base,
+ uint8_t port,
+ struct elink_phy *phy)
{
uint32_t ext_phy_config, phy_type, config2;
uint32_t mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
@@ -11407,10 +13293,13 @@ static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834:
*phy = phy_84834;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858:
+ *phy = phy_84858;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE:
+ case PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE:
*phy = phy_54618se;
- if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE)
+ if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE)
phy->flags |= ELINK_FLAGS_EEE;
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
@@ -11436,21 +13325,20 @@ static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc,
* the address
*/
config2 = REG_RD(sc, shmem_base + offsetof(struct shmem_region,
- dev_info.shared_hw_config.
- config2));
+ dev_info.shared_hw_config.config2));
if (phy_index == ELINK_EXT_PHY1) {
phy->ver_addr = shmem_base + offsetof(struct shmem_region,
- port_mb[port].
- ext_phy_fw_version);
+ port_mb[port].ext_phy_fw_version);
/* Check specific mdc mdio settings */
if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
mdc_mdio_access = config2 &
- SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
} else {
uint32_t size = REG_RD(sc, shmem2_base);
- if (size > offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+ if (size >
+ offsetof(struct shmem2_region, ext_phy_fw_version2)) {
phy->ver_addr = shmem2_base +
offsetof(struct shmem2_region,
ext_phy_fw_version2[port]);
@@ -11458,35 +13346,34 @@ static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc,
/* Check specific mdc mdio settings */
if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
mdc_mdio_access = (config2 &
- SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
- >> (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
- SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
+ SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+ (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
}
phy->mdio_ctrl = elink_get_emac_base(sc, mdc_mdio_access, port);
- if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) &&
- (phy->ver_addr)) {
+ if (elink_is_8483x_8485x(phy) && (phy->ver_addr)) {
/* Remove 100Mb link supported for BNX2X84833/4 when phy fw
* version lower than or equal to 1.39
*/
uint32_t raw_ver = REG_RD(sc, phy->ver_addr);
- if (((raw_ver & 0x7F) <= 39) && (((raw_ver & 0xF80) >> 7) <= 1))
+ if (((raw_ver & 0x7F) <= 39) &&
+ (((raw_ver & 0xF80) >> 7) <= 1))
phy->supported &= ~(ELINK_SUPPORTED_100baseT_Half |
ELINK_SUPPORTED_100baseT_Full);
}
- PMD_DRV_LOG(DEBUG, "phy_type 0x%x port %d found in index %d",
- phy_type, port, phy_index);
- PMD_DRV_LOG(DEBUG, " addr=0x%x, mdio_ctl=0x%x",
- phy->addr, phy->mdio_ctrl);
+ ELINK_DEBUG_P3(sc, "phy_type 0x%x port %d found in index %d",
+ phy_type, port, phy_index);
+ ELINK_DEBUG_P2(sc, " addr=0x%x, mdio_ctl=0x%x",
+ phy->addr, phy->mdio_ctrl);
return ELINK_STATUS_OK;
}
static elink_status_t elink_populate_phy(struct bnx2x_softc *sc,
- uint8_t phy_index, uint32_t shmem_base,
- uint32_t shmem2_base, uint8_t port,
- struct elink_phy *phy)
+ uint8_t phy_index, uint32_t shmem_base,
+ uint32_t shmem2_base, uint8_t port,
+ struct elink_phy *phy)
{
elink_status_t status = ELINK_STATUS_OK;
phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
@@ -11498,50 +13385,44 @@ static elink_status_t elink_populate_phy(struct bnx2x_softc *sc,
}
static void elink_phy_def_cfg(struct elink_params *params,
- struct elink_phy *phy, uint8_t phy_index)
+ struct elink_phy *phy,
+ uint8_t phy_index)
{
struct bnx2x_softc *sc = params->sc;
uint32_t link_config;
/* Populate the default phy configuration for MF mode */
if (phy_index == ELINK_EXT_PHY2) {
link_config = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_feature_config
- [params->port].link_config2));
- phy->speed_cap_mask =
- REG_RD(sc,
- params->shmem_base + offsetof(struct shmem_region,
- dev_info.port_hw_config
- [params->port].
- speed_capability_mask2));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config2));
+ phy->speed_cap_mask = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask2));
} else {
link_config = REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_feature_config
- [params->port].link_config));
- phy->speed_cap_mask =
- REG_RD(sc,
- params->shmem_base + offsetof(struct shmem_region,
- dev_info.port_hw_config
- [params->port].
- speed_capability_mask));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config));
+ phy->speed_cap_mask = REG_RD(sc, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
}
-
- PMD_DRV_LOG(DEBUG,
- "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x",
- phy_index, link_config, phy->speed_cap_mask);
+ ELINK_DEBUG_P3(sc,
+ "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x",
+ phy_index, link_config, phy->speed_cap_mask);
phy->req_duplex = DUPLEX_FULL;
- switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fall-through */
+ /* fallthrough */
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = ELINK_SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fall-through */
+ /* fallthrough */
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = ELINK_SPEED_100;
break;
@@ -11559,7 +13440,10 @@ static void elink_phy_def_cfg(struct elink_params *params,
break;
}
- switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
+ ELINK_DEBUG_P2(sc, "Default config phy idx %x, req_duplex config %x",
+ phy_index, phy->req_duplex);
+
+ switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
case PORT_FEATURE_FLOW_CONTROL_AUTO:
phy->req_flow_ctrl = ELINK_FLOW_CTRL_AUTO;
break;
@@ -11576,6 +13460,9 @@ static void elink_phy_def_cfg(struct elink_params *params,
phy->req_flow_ctrl = ELINK_FLOW_CTRL_NONE;
break;
}
+ ELINK_DEBUG_P3(sc, "Requested Duplex = %x, line_speed = %x, flow_ctrl = %x",
+ phy->req_duplex, phy->req_line_speed,
+ phy->req_flow_ctrl);
}
uint32_t elink_phy_selection(struct elink_params *params)
@@ -11584,25 +13471,24 @@ uint32_t elink_phy_selection(struct elink_params *params)
uint32_t return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
phy_config_swapped = params->multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
- prio_cfg = params->multi_phy_config & PORT_HW_CFG_PHY_SELECTION_MASK;
+ prio_cfg = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SELECTION_MASK;
if (phy_config_swapped) {
switch (prio_cfg) {
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
- return_cfg =
- PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
- return_cfg =
- PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
- return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
break;
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
- return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
break;
}
} else
@@ -11611,19 +13497,23 @@ uint32_t elink_phy_selection(struct elink_params *params)
return return_cfg;
}
-elink_status_t elink_phy_probe(struct elink_params * params)
+elink_status_t elink_phy_probe(struct elink_params *params)
{
uint8_t phy_index, actual_phy_idx;
uint32_t phy_config_swapped, sync_offset, media_types;
struct bnx2x_softc *sc = params->sc;
struct elink_phy *phy;
params->num_phys = 0;
- PMD_DRV_LOG(DEBUG, "Begin phy probe");
-
+ ELINK_DEBUG_P0(sc, "Begin phy probe");
+#ifdef ELINK_INCLUDE_EMUL
+ if (CHIP_REV_IS_EMUL(sc))
+ return ELINK_STATUS_OK;
+#endif
phy_config_swapped = params->multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
- for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; phy_index++) {
+ for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS;
+ phy_index++) {
actual_phy_idx = phy_index;
if (phy_config_swapped) {
if (phy_index == ELINK_EXT_PHY1)
@@ -11631,18 +13521,19 @@ elink_status_t elink_phy_probe(struct elink_params * params)
else if (phy_index == ELINK_EXT_PHY2)
actual_phy_idx = ELINK_EXT_PHY1;
}
- PMD_DRV_LOG(DEBUG, "phy_config_swapped %x, phy_index %x,"
- " actual_phy_idx %x", phy_config_swapped,
- phy_index, actual_phy_idx);
+ ELINK_DEBUG_P3(sc, "phy_config_swapped %x, phy_index %x,"
+ " actual_phy_idx %x", phy_config_swapped,
+ phy_index, actual_phy_idx);
phy = &params->phy[actual_phy_idx];
if (elink_populate_phy(sc, phy_index, params->shmem_base,
params->shmem2_base, params->port,
phy) != ELINK_STATUS_OK) {
params->num_phys = 0;
- PMD_DRV_LOG(DEBUG, "phy probe failed in phy index %d",
- phy_index);
+ ELINK_DEBUG_P1(sc, "phy probe failed in phy index %d",
+ phy_index);
for (phy_index = ELINK_INT_PHY;
- phy_index < ELINK_MAX_PHYS; phy_index++)
+ phy_index < ELINK_MAX_PHYS;
+ phy_index++)
*phy = phy_null;
return ELINK_STATUS_ERROR;
}
@@ -11658,8 +13549,8 @@ elink_status_t elink_phy_probe(struct elink_params * params)
phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_G;
sync_offset = params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].media_type);
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
media_types = REG_RD(sc, sync_offset);
/* Update media type for non-PMF sync only for the first time
@@ -11670,9 +13561,9 @@ elink_status_t elink_phy_probe(struct elink_params * params)
(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
actual_phy_idx))) == 0) {
media_types |= ((phy->media_type &
- PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
- (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
- actual_phy_idx));
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx));
}
REG_WR(sc, sync_offset, media_types);
@@ -11680,47 +13571,231 @@ elink_status_t elink_phy_probe(struct elink_params * params)
params->num_phys++;
}
- PMD_DRV_LOG(DEBUG, "End phy probe. #phys found %x", params->num_phys);
+ ELINK_DEBUG_P1(sc, "End phy probe. #phys found %x", params->num_phys);
return ELINK_STATUS_OK;
}
-static void elink_init_bmac_loopback(struct elink_params *params,
- struct elink_vars *vars)
+#ifdef ELINK_INCLUDE_EMUL
+static elink_status_t elink_init_e3_emul_mac(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ vars->line_speed = params->req_line_speed[0];
+ /* In case link speed is auto, set speed the highest as possible */
+ if (params->req_line_speed[0] == ELINK_SPEED_AUTO_NEG) {
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC)
+ vars->line_speed = ELINK_SPEED_2500;
+ else if (elink_is_4_port_mode(sc))
+ vars->line_speed = ELINK_SPEED_10000;
+ else
+ vars->line_speed = ELINK_SPEED_20000;
+ }
+ if (vars->line_speed < ELINK_SPEED_10000) {
+ if ((params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC)) {
+ ELINK_DEBUG_P1(sc, "Invalid line speed %d while UMAC is"
+ " disabled!", params->req_line_speed[0]);
+ return ELINK_STATUS_ERROR;
+ }
+ switch (vars->line_speed) {
+ case ELINK_SPEED_10:
+ vars->link_status = ELINK_LINK_10TFD;
+ break;
+ case ELINK_SPEED_100:
+ vars->link_status = ELINK_LINK_100TXFD;
+ break;
+ case ELINK_SPEED_1000:
+ vars->link_status = ELINK_LINK_1000TFD;
+ break;
+ case ELINK_SPEED_2500:
+ vars->link_status = ELINK_LINK_2500TFD;
+ break;
+ default:
+ ELINK_DEBUG_P1(sc, "Invalid line speed %d for UMAC",
+ vars->line_speed);
+ return ELINK_STATUS_ERROR;
+ }
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ if (params->loopback_mode == ELINK_LOOPBACK_UMAC)
+ elink_umac_enable(params, vars, 1);
+ else
+ elink_umac_enable(params, vars, 0);
+ } else {
+ /* Link speed >= 10000 requires XMAC enabled */
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC) {
+ ELINK_DEBUG_P1(sc, "Invalid line speed %d while XMAC is"
+ " disabled!", params->req_line_speed[0]);
+ return ELINK_STATUS_ERROR;
+ }
+ /* Check link speed */
+ switch (vars->line_speed) {
+ case ELINK_SPEED_10000:
+ vars->link_status = ELINK_LINK_10GTFD;
+ break;
+ case ELINK_SPEED_20000:
+ vars->link_status = ELINK_LINK_20GTFD;
+ break;
+ default:
+ ELINK_DEBUG_P1(sc, "Invalid line speed %d for XMAC",
+ vars->line_speed);
+ return ELINK_STATUS_ERROR;
+ }
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ if (params->loopback_mode == ELINK_LOOPBACK_XMAC)
+ elink_xmac_enable(params, vars, 1);
+ else
+ elink_xmac_enable(params, vars, 0);
+ }
+ return ELINK_STATUS_OK;
+}
+
+static elink_status_t elink_init_emul(struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
+ if (CHIP_IS_E3(sc)) {
+ if (elink_init_e3_emul_mac(params, vars) !=
+ ELINK_STATUS_OK)
+ return ELINK_STATUS_ERROR;
+ } else {
+ if (params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC) {
+ vars->line_speed = ELINK_SPEED_1000;
+ vars->link_status = (LINK_STATUS_LINK_UP |
+ ELINK_LINK_1000XFD);
+ if (params->loopback_mode ==
+ ELINK_LOOPBACK_EMAC)
+ elink_emac_enable(params, vars, 1);
+ else
+ elink_emac_enable(params, vars, 0);
+ } else {
+ vars->line_speed = ELINK_SPEED_10000;
+ vars->link_status = (LINK_STATUS_LINK_UP |
+ ELINK_LINK_10GTFD);
+ if (params->loopback_mode ==
+ ELINK_LOOPBACK_BMAC)
+ elink_bmac_enable(params, vars, 1, 1);
+ else
+ elink_bmac_enable(params, vars, 0, 1);
+ }
+ }
vars->link_up = 1;
- vars->line_speed = ELINK_SPEED_10000;
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
- vars->mac_type = ELINK_MAC_TYPE_BMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
-
- elink_xgxs_deassert(params);
+ if (CHIP_IS_E1x(sc))
+ elink_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+ /* Disable drain */
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
- /* Set bmac loopback */
- elink_bmac_enable(params, vars, 1, 1);
+ /* update shared memory */
+ elink_update_mng(params, vars->link_status);
+ return ELINK_STATUS_OK;
+}
+#endif
+#ifdef ELINK_INCLUDE_FPGA
+static elink_status_t elink_init_fpga(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ /* Enable on E1.5 FPGA */
+ struct bnx2x_softc *sc = params->sc;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ if (!(CHIP_IS_E1(sc))) {
+ vars->flow_ctrl = (ELINK_FLOW_CTRL_TX |
+ ELINK_FLOW_CTRL_RX);
+ vars->link_status |= (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+ }
+ if (CHIP_IS_E3(sc)) {
+ vars->line_speed = params->req_line_speed[0];
+ switch (vars->line_speed) {
+ case ELINK_SPEED_AUTO_NEG:
+ vars->line_speed = ELINK_SPEED_2500;
+ case ELINK_SPEED_2500:
+ vars->link_status = ELINK_LINK_2500TFD;
+ break;
+ case ELINK_SPEED_1000:
+ vars->link_status = ELINK_LINK_1000XFD;
+ break;
+ case ELINK_SPEED_100:
+ vars->link_status = ELINK_LINK_100TXFD;
+ break;
+ case ELINK_SPEED_10:
+ vars->link_status = ELINK_LINK_10TFD;
+ break;
+ default:
+ ELINK_DEBUG_P1(sc, "Invalid link speed %d",
+ params->req_line_speed[0]);
+ return ELINK_STATUS_ERROR;
+ }
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ if (params->loopback_mode == ELINK_LOOPBACK_UMAC)
+ elink_umac_enable(params, vars, 1);
+ else
+ elink_umac_enable(params, vars, 0);
+ } else {
+ vars->line_speed = ELINK_SPEED_10000;
+ vars->link_status = (LINK_STATUS_LINK_UP | ELINK_LINK_10GTFD);
+ if (params->loopback_mode == ELINK_LOOPBACK_EMAC)
+ elink_emac_enable(params, vars, 1);
+ else
+ elink_emac_enable(params, vars, 0);
+ }
+ vars->link_up = 1;
+ if (CHIP_IS_E1x(sc))
+ elink_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+ /* Disable drain */
REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+
+ /* Update shared memory */
+ elink_update_mng(params, vars->link_status);
+ return ELINK_STATUS_OK;
+}
+#endif
+static void elink_init_bmac_loopback(struct elink_params *params,
+ struct elink_vars *vars)
+{
+ struct bnx2x_softc *sc = params->sc;
+ vars->link_up = 1;
+ vars->line_speed = ELINK_SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_BMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ elink_xgxs_deassert(params);
+
+ /* Set bmac loopback */
+ elink_bmac_enable(params, vars, 1, 1);
+
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void elink_init_emac_loopback(struct elink_params *params,
struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
- vars->link_up = 1;
- vars->line_speed = ELINK_SPEED_1000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
- vars->mac_type = ELINK_MAC_TYPE_EMAC;
+ vars->link_up = 1;
+ vars->line_speed = ELINK_SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
+ vars->mac_type = ELINK_MAC_TYPE_EMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = PHY_XGXS_FLAG;
- elink_xgxs_deassert(params);
- /* Set bmac loopback */
- elink_emac_enable(params, vars, 1);
- elink_emac_program(params, vars);
- REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+ elink_xgxs_deassert(params);
+ /* Set bmac loopback */
+ elink_emac_enable(params, vars, 1);
+ elink_emac_program(params, vars);
+ REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void elink_init_xmac_loopback(struct elink_params *params,
@@ -11741,8 +13816,9 @@ static void elink_init_xmac_loopback(struct elink_params *params,
*/
elink_set_aer_mmd(params, &params->phy[0]);
elink_warpcore_reset_lane(sc, &params->phy[0], 0);
- params->phy[ELINK_INT_PHY].config_loopback(&params->phy[ELINK_INT_PHY],
- params);
+ params->phy[ELINK_INT_PHY].config_loopback(
+ &params->phy[ELINK_INT_PHY],
+ params);
elink_xmac_enable(params, vars, 1);
REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
@@ -11804,12 +13880,11 @@ static void elink_init_xgxs_loopback(struct elink_params *params,
/* Set external phy loopback */
uint8_t phy_index;
for (phy_index = ELINK_EXT_PHY1;
- phy_index < params->num_phys; phy_index++)
+ phy_index < params->num_phys; phy_index++)
if (params->phy[phy_index].config_loopback)
- params->phy[phy_index].config_loopback(&params->
- phy
- [phy_index],
- params);
+ params->phy[phy_index].config_loopback(
+ &params->phy[phy_index],
+ params);
}
REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
@@ -11826,12 +13901,14 @@ void elink_set_rx_filter(struct elink_params *params, uint8_t en)
val |= en * 0x20;
REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + params->port * 4, val);
- REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port * 4, en * 0x3);
+ if (!CHIP_IS_E1(sc)) {
+ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port * 4,
+ en * 0x3);
+ }
REG_WR(sc, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
NIG_REG_LLH0_BRB1_NOT_MCP), en);
}
-
static elink_status_t elink_avoid_link_flap(struct elink_params *params,
struct elink_vars *vars)
{
@@ -11839,6 +13916,7 @@ static elink_status_t elink_avoid_link_flap(struct elink_params *params,
uint32_t dont_clear_stat, lfa_sts;
struct bnx2x_softc *sc = params->sc;
+ elink_set_mdio_emac_per_phy(sc, params);
/* Sync the link parameters */
elink_link_status_update(params, vars);
@@ -11850,7 +13928,7 @@ static elink_status_t elink_avoid_link_flap(struct elink_params *params,
for (phy_idx = ELINK_INT_PHY; phy_idx < params->num_phys; phy_idx++) {
struct elink_phy *phy = &params->phy[phy_idx];
if (phy->phy_specific_func) {
- PMD_DRV_LOG(DEBUG, "Calling PHY specific func");
+ ELINK_DEBUG_P0(sc, "Calling PHY specific func");
phy->phy_specific_func(phy, params, ELINK_PHY_INIT);
}
if ((phy->media_type == ELINK_ETH_PHY_SFPP_10G_FIBER) ||
@@ -11859,7 +13937,8 @@ static elink_status_t elink_avoid_link_flap(struct elink_params *params,
elink_verify_sfp_module(phy, params);
}
lfa_sts = REG_RD(sc, params->lfa_base +
- offsetof(struct shmem_lfa, lfa_sts));
+ offsetof(struct shmem_lfa,
+ lfa_sts));
dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
@@ -11970,12 +14049,12 @@ elink_status_t elink_phy_init(struct elink_params *params,
{
int lfa_status;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Phy Initialization started");
- PMD_DRV_LOG(DEBUG, "(1) req_speed %d, req_flowctrl %d",
- params->req_line_speed[0], params->req_flow_ctrl[0]);
- PMD_DRV_LOG(DEBUG, "(2) req_speed %d, req_flowctrl %d",
- params->req_line_speed[1], params->req_flow_ctrl[1]);
- PMD_DRV_LOG(DEBUG, "req_adv_flow_ctrl 0x%x", params->req_fc_auto_adv);
+ ELINK_DEBUG_P0(sc, "Phy Initialization started");
+ ELINK_DEBUG_P2(sc, "(1) req_speed %d, req_flowctrl %d",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ ELINK_DEBUG_P2(sc, "(2) req_speed %d, req_flowctrl %d",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
+ ELINK_DEBUG_P1(sc, "req_adv_flow_ctrl 0x%x", params->req_fc_auto_adv);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -11988,15 +14067,33 @@ elink_status_t elink_phy_init(struct elink_params *params,
params->link_flags = ELINK_PHY_INITIALIZED;
/* Driver opens NIG-BRB filters */
elink_set_rx_filter(params, 1);
+ elink_chng_link_count(params, 1);
/* Check if link flap can be avoided */
lfa_status = elink_check_lfa(params);
+ ELINK_DEBUG_P3(sc, " params : port = %x, loopback_mode = %x req_duplex = %x",
+ params->port, params->loopback_mode,
+ params->req_duplex[0]);
+ ELINK_DEBUG_P3(sc, " params : switch_cfg = %x, lane_config = %x req_duplex[1] = %x",
+ params->switch_cfg, params->lane_config,
+ params->req_duplex[1]);
+ ELINK_DEBUG_P3(sc, " params : chip_id = %x, feature_config_flags = %x, num_phys = %x",
+ params->chip_id, params->feature_config_flags,
+ params->num_phys);
+ ELINK_DEBUG_P3(sc, " params : rsrv = %x, eee_mode = %x, hw_led_mode = %x",
+ params->rsrv, params->eee_mode, params->hw_led_mode);
+ ELINK_DEBUG_P3(sc, " params : multi_phy = %x, req_fc_auto_adv = %x, link_flags = %x",
+ params->multi_phy_config, params->req_fc_auto_adv,
+ params->link_flags);
+ ELINK_DEBUG_P2(sc, " params : lfa_base = %x, link_attr = %x",
+ params->lfa_base, params->link_attr_sync);
if (lfa_status == 0) {
- PMD_DRV_LOG(DEBUG, "Link Flap Avoidance in progress");
+ ELINK_DEBUG_P0(sc, "Link Flap Avoidance in progress");
return elink_avoid_link_flap(params, vars);
}
- PMD_DRV_LOG(DEBUG, "Cannot avoid link flap lfa_sta=0x%x", lfa_status);
+ ELINK_DEBUG_P1(sc, "Cannot avoid link flap lfa_sta=0x%x",
+ lfa_status);
elink_cannot_avoid_link_flap(params, vars, lfa_status);
/* Disable attentions */
@@ -12005,20 +14102,34 @@ elink_status_t elink_phy_init(struct elink_params *params,
ELINK_NIG_MASK_XGXS0_LINK10G |
ELINK_NIG_MASK_SERDES0_LINK_STATUS |
ELINK_NIG_MASK_MI_INT));
+#ifdef ELINK_INCLUDE_EMUL
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC))
+#endif
- elink_emac_init(params);
+ elink_emac_init(params, vars);
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
vars->link_status |= LINK_STATUS_PFC_ENABLED;
- if ((params->num_phys == 0) && !CHIP_REV_IS_SLOW(sc)) {
- PMD_DRV_LOG(DEBUG, "No phy found for initialization !!");
+ if ((params->num_phys == 0) &&
+ !CHIP_REV_IS_SLOW(sc)) {
+ ELINK_DEBUG_P0(sc, "No phy found for initialization !!");
return ELINK_STATUS_ERROR;
}
set_phy_vars(params, vars);
- PMD_DRV_LOG(DEBUG, "Num of phys on board: %d", params->num_phys);
-
+ ELINK_DEBUG_P1(sc, "Num of phys on board: %d", params->num_phys);
+#ifdef ELINK_INCLUDE_FPGA
+ if (CHIP_REV_IS_FPGA(sc)) {
+ return elink_init_fpga(params, vars);
+ } else
+#endif
+#ifdef ELINK_INCLUDE_EMUL
+ if (CHIP_REV_IS_EMUL(sc)) {
+ return elink_init_emul(params, vars);
+ } else
+#endif
switch (params->loopback_mode) {
case ELINK_LOOPBACK_BMAC:
elink_init_bmac_loopback(params, vars);
@@ -12054,15 +14165,16 @@ elink_status_t elink_phy_init(struct elink_params *params,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_link_reset(struct elink_params *params,
- struct elink_vars *vars,
- uint8_t reset_ext_phy)
+elink_status_t elink_link_reset(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t reset_ext_phy)
{
struct bnx2x_softc *sc = params->sc;
uint8_t phy_index, port = params->port, clear_latch_ind = 0;
- PMD_DRV_LOG(DEBUG, "Resetting the link of port %d", port);
+ ELINK_DEBUG_P1(sc, "Resetting the link of port %d", port);
/* Disable attentions */
vars->link_status = 0;
+ elink_chng_link_count(params, 1);
elink_update_mng(params, vars->link_status);
vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
SHMEM_EEE_ACTIVE_BIT);
@@ -12081,12 +14193,24 @@ static elink_status_t elink_link_reset(struct elink_params *params,
REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0);
REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0);
}
- if (!CHIP_IS_E3(sc))
- elink_set_bmac_rx(sc, port, 0);
- if (CHIP_IS_E3(sc) && !CHIP_REV_IS_FPGA(sc)) {
- elink_set_xmac_rxtx(params, 0);
- elink_set_umac_rxtx(params, 0);
- }
+
+#ifdef ELINK_INCLUDE_EMUL
+ /* Stop BigMac rx */
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC))
+#endif
+ if (!CHIP_IS_E3(sc))
+ elink_set_bmac_rx(sc, params->chip_id, port, 0);
+#ifdef ELINK_INCLUDE_EMUL
+ /* Stop XMAC/UMAC rx */
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC))
+#endif
+ if (CHIP_IS_E3(sc) &&
+ !CHIP_REV_IS_FPGA(sc)) {
+ elink_set_xmac_rxtx(params, 0);
+ elink_set_umac_rxtx(params, 0);
+ }
/* Disable emac */
if (!CHIP_IS_E3(sc))
REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0);
@@ -12095,20 +14219,19 @@ static elink_status_t elink_link_reset(struct elink_params *params,
/* The PHY reset is controlled by GPIO 1
* Hold it as vars low
*/
- /* Clear link led */
+ /* Clear link led */
elink_set_mdio_emac_per_phy(sc, params);
elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0);
if (reset_ext_phy && (!CHIP_REV_IS_SLOW(sc))) {
for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys;
- phy_index++) {
+ phy_index++) {
if (params->phy[phy_index].link_reset) {
elink_set_aer_mmd(params,
&params->phy[phy_index]);
- params->phy[phy_index].link_reset(&params->
- phy
- [phy_index],
- params);
+ params->phy[phy_index].link_reset(
+ &params->phy[phy_index],
+ params);
}
if (params->phy[phy_index].flags &
ELINK_FLAGS_REARM_LATCH_SIGNAL)
@@ -12122,11 +14245,12 @@ static elink_status_t elink_link_reset(struct elink_params *params,
elink_bits_dis(sc, NIG_REG_LATCH_BC_0 + port * 4,
1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT);
}
+#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
+ if (!CHIP_REV_IS_SLOW(sc))
+#endif
if (params->phy[ELINK_INT_PHY].link_reset)
- params->phy[ELINK_INT_PHY].link_reset(&params->
- phy
- [ELINK_INT_PHY],
- params);
+ params->phy[ELINK_INT_PHY].link_reset(
+ &params->phy[ELINK_INT_PHY], params);
/* Disable nig ingress interface */
if (!CHIP_IS_E3(sc)) {
@@ -12136,8 +14260,8 @@ static elink_status_t elink_link_reset(struct elink_params *params,
REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0);
REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0);
} else {
- uint32_t xmac_base =
- (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ uint32_t xmac_base = (params->port) ? GRCBASE_XMAC1 :
+ GRCBASE_XMAC0;
elink_set_xumac_nig(params, 0, 0);
if (REG_RD(sc, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)
@@ -12148,9 +14272,8 @@ static elink_status_t elink_link_reset(struct elink_params *params,
vars->phy_flags = 0;
return ELINK_STATUS_OK;
}
-
-elink_status_t elink_lfa_reset(struct elink_params * params,
- struct elink_vars * vars)
+elink_status_t elink_lfa_reset(struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
vars->link_up = 0;
@@ -12169,13 +14292,13 @@ elink_status_t elink_lfa_reset(struct elink_params * params,
* are passed.
*/
if (!CHIP_IS_E3(sc))
- elink_set_bmac_rx(sc, params->port, 0);
+ elink_set_bmac_rx(sc, params->chip_id, params->port, 0);
if (CHIP_IS_E3(sc)) {
elink_set_xmac_rxtx(params, 0);
elink_set_umac_rxtx(params, 0);
}
- /* Wait 10ms for the pipe to clean up */
+ /* Wait 10ms for the pipe to clean up*/
DELAY(1000 * 10);
/* Clean the NIG-BRB using the network filters in a way that will
@@ -12190,7 +14313,7 @@ elink_status_t elink_lfa_reset(struct elink_params * params,
* minimum management protocol down time.
*/
if (!CHIP_IS_E3(sc))
- elink_set_bmac_rx(sc, params->port, 1);
+ elink_set_bmac_rx(sc, params->chip_id, params->port, 1);
if (CHIP_IS_E3(sc)) {
elink_set_xmac_rxtx(params, 1);
@@ -12205,10 +14328,10 @@ elink_status_t elink_lfa_reset(struct elink_params * params,
/* Common function */
/****************************************************************************/
static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
- uint32_t shmem_base_path[],
- uint32_t shmem2_base_path[],
- uint8_t phy_index,
- __rte_unused uint32_t chip_id)
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ __rte_unused uint32_t chip_id)
{
struct elink_phy phy[PORT_MAX];
struct elink_phy *phy_blk[PORT_MAX];
@@ -12216,8 +14339,8 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
int8_t port = 0;
int8_t port_of_path = 0;
uint32_t swap_val, swap_override;
- swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
+ swap_val = REG_RD(sc, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE);
port ^= (swap_val && swap_override);
elink_ext_phy_hw_reset(sc, port);
/* PART1 - Reset both phys */
@@ -12238,7 +14361,7 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
port_of_path, &phy[port]) !=
ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate_phy failed");
+ ELINK_DEBUG_P0(sc, "populate_phy failed");
return ELINK_STATUS_ERROR;
}
/* Disable attentions */
@@ -12253,11 +14376,14 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
* to write to access its registers
*/
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
/* Reset the phy */
elink_cl45_write(sc, &phy[port],
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1 << 15);
}
/* Add delay of 150ms after reset */
@@ -12278,8 +14404,8 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
else
port_of_path = 0;
- PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x",
- phy_blk[port]->addr);
+ ELINK_DEBUG_P1(sc, "Loading spirom for phy address 0x%x",
+ phy_blk[port]->addr);
if (elink_8073_8727_external_rom_boot(sc, phy_blk[port],
port_of_path))
return ELINK_STATUS_ERROR;
@@ -12292,7 +14418,8 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
/* Phase1 of TX_POWER_DOWN reset */
elink_cl45_write(sc, phy_blk[port],
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val | 1 << 10));
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1 << 10));
}
/* Toggle Transmitter: Power down and then up with 600ms delay
@@ -12309,9 +14436,9 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
MDIO_PMA_REG_TX_POWER_DOWN, &val);
elink_cl45_write(sc, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN,
- (val & (~(1 << 10))));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val & (~(1 << 10))));
DELAY(1000 * 15);
/* Read modify write the SPI-ROM version select register */
@@ -12324,16 +14451,15 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
/* set GPIO2 back to LOW */
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
return ELINK_STATUS_OK;
}
-
static elink_status_t elink_8726_common_init_phy(struct bnx2x_softc *sc,
- uint32_t shmem_base_path[],
- uint32_t shmem2_base_path[],
- uint8_t phy_index,
- __rte_unused uint32_t chip_id)
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ __rte_unused uint32_t chip_id)
{
uint32_t val;
int8_t port;
@@ -12342,8 +14468,8 @@ static elink_status_t elink_8726_common_init_phy(struct bnx2x_softc *sc,
/* Enable the module detection interrupt */
val = REG_RD(sc, MISC_REG_GPIO_EVENT_EN);
val |= ((1 << MISC_REGISTERS_GPIO_3) |
- (1 <<
- (MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+ (1 << (MISC_REGISTERS_GPIO_3 +
+ MISC_REGISTERS_GPIO_PORT_SHIFT)));
REG_WR(sc, MISC_REG_GPIO_EVENT_EN, val);
elink_ext_phy_hw_reset(sc, 0);
@@ -12361,33 +14487,33 @@ static elink_status_t elink_8726_common_init_phy(struct bnx2x_softc *sc,
}
/* Extract the ext phy address for the port */
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
- port, &phy) != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ port, &phy) !=
+ ELINK_STATUS_OK) {
+ ELINK_DEBUG_P0(sc, "populate phy failed");
return ELINK_STATUS_ERROR;
}
- /* Reset phy */
+ /* Reset phy*/
elink_cl45_write(sc, &phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
/* Set fault module detected LED on */
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH, port);
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
}
return ELINK_STATUS_OK;
}
-
static void elink_get_ext_phy_reset_gpio(struct bnx2x_softc *sc,
- uint32_t shmem_base, uint8_t * io_gpio,
- uint8_t * io_port)
+ uint32_t shmem_base,
+ uint8_t *io_gpio, uint8_t *io_port)
{
uint32_t phy_gpio_reset = REG_RD(sc, shmem_base +
- offsetof(struct shmem_region,
- dev_info.
- port_hw_config[PORT_0].
- default_cfg));
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
switch (phy_gpio_reset) {
case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
*io_gpio = 0;
@@ -12428,10 +14554,10 @@ static void elink_get_ext_phy_reset_gpio(struct bnx2x_softc *sc,
}
static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
- uint32_t shmem_base_path[],
- uint32_t shmem2_base_path[],
- uint8_t phy_index,
- __rte_unused uint32_t chip_id)
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ __rte_unused uint32_t chip_id)
{
int8_t port, reset_gpio;
uint32_t swap_val, swap_override;
@@ -12448,18 +14574,17 @@ static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
* Default is GPIO1, PORT1
*/
elink_get_ext_phy_reset_gpio(sc, shmem_base_path[0],
- (uint8_t *) & reset_gpio,
- (uint8_t *) & port);
+ (uint8_t *)&reset_gpio, (uint8_t *)&port);
/* Calculate the port based on port swap */
port ^= (swap_val && swap_override);
- /* Initiate PHY reset */
+ /* Initiate PHY reset*/
elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
+ port);
DELAY(1000 * 1);
elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- port);
+ port);
DELAY(1000 * 5);
@@ -12481,8 +14606,8 @@ static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
/* Extract the ext phy address for the port */
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
port_of_path, &phy[port]) !=
- ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ ELINK_STATUS_OK) {
+ ELINK_DEBUG_P0(sc, "populate phy failed");
return ELINK_STATUS_ERROR;
}
/* disable attentions */
@@ -12493,6 +14618,7 @@ static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
ELINK_NIG_MASK_SERDES0_LINK_STATUS |
ELINK_NIG_MASK_MI_INT));
+
/* Reset the phy */
elink_cl45_write(sc, &phy[port],
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
@@ -12513,25 +14639,25 @@ static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
port_of_path = port;
else
port_of_path = 0;
- PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x",
- phy_blk[port]->addr);
+ ELINK_DEBUG_P1(sc, "Loading spirom for phy address 0x%x",
+ phy_blk[port]->addr);
if (elink_8073_8727_external_rom_boot(sc, phy_blk[port],
port_of_path))
return ELINK_STATUS_ERROR;
/* Disable PHY transmitter output */
elink_cl45_write(sc, phy_blk[port],
- MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_DISABLE, 1);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, 1);
}
return ELINK_STATUS_OK;
}
static elink_status_t elink_84833_common_init_phy(struct bnx2x_softc *sc,
- uint32_t shmem_base_path[],
- __rte_unused uint32_t
- shmem2_base_path[],
- __rte_unused uint8_t
- phy_index, uint32_t chip_id)
+ uint32_t shmem_base_path[],
+ __rte_unused uint32_t shmem2_base_path[],
+ __rte_unused uint8_t phy_index,
+ uint32_t chip_id)
{
uint8_t reset_gpios;
reset_gpios = elink_84833_get_reset_gpios(sc, shmem_base_path, chip_id);
@@ -12540,16 +14666,15 @@ static elink_status_t elink_84833_common_init_phy(struct bnx2x_softc *sc,
DELAY(10);
elink_cb_gpio_mult_write(sc, reset_gpios,
MISC_REGISTERS_GPIO_OUTPUT_HIGH);
- PMD_DRV_LOG(DEBUG, "84833 reset pulse on pin values 0x%x", reset_gpios);
+ ELINK_DEBUG_P1(sc, "84833 reset pulse on pin values 0x%x",
+ reset_gpios);
return ELINK_STATUS_OK;
}
-
static elink_status_t elink_ext_phy_common_init(struct bnx2x_softc *sc,
- uint32_t shmem_base_path[],
- uint32_t shmem2_base_path[],
- uint8_t phy_index,
- uint32_t ext_phy_type,
- uint32_t chip_id)
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[],
+ uint8_t phy_index,
+ uint32_t ext_phy_type, uint32_t chip_id)
{
elink_status_t rc = ELINK_STATUS_OK;
@@ -12577,44 +14702,50 @@ static elink_status_t elink_ext_phy_common_init(struct bnx2x_softc *sc,
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84858:
/* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
rc = elink_84833_common_init_phy(sc, shmem_base_path,
- shmem2_base_path,
- phy_index, chip_id);
+ shmem2_base_path,
+ phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
rc = ELINK_STATUS_ERROR;
break;
default:
- PMD_DRV_LOG(DEBUG,
- "ext_phy 0x%x common init not required",
- ext_phy_type);
+ ELINK_DEBUG_P1(sc,
+ "ext_phy 0x%x common init not required",
+ ext_phy_type);
break;
}
if (rc != ELINK_STATUS_OK)
- elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, 0); // "Warning: PHY was not initialized,"
- // " Port %d",
+ elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, 0);
+ /* "Warning: PHY was not initialized,"
+ * " Port %d",
+ */
return rc;
}
-elink_status_t elink_common_init_phy(struct bnx2x_softc * sc,
- uint32_t shmem_base_path[],
- uint32_t shmem2_base_path[],
- uint32_t chip_id,
- __rte_unused uint8_t one_port_enabled)
+elink_status_t elink_common_init_phy(struct bnx2x_softc *sc,
+ uint32_t shmem_base_path[],
+ uint32_t shmem2_base_path[], uint32_t chip_id,
+ __rte_unused uint8_t one_port_enabled)
{
elink_status_t rc = ELINK_STATUS_OK;
uint32_t phy_ver, val;
uint8_t phy_index = 0;
uint32_t ext_phy_type, ext_phy_config;
+#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
+ if (CHIP_REV_IS_EMUL(sc) || CHIP_REV_IS_FPGA(sc))
+ return ELINK_STATUS_OK;
+#endif
- elink_set_mdio_clk(sc, GRCBASE_EMAC0);
- elink_set_mdio_clk(sc, GRCBASE_EMAC1);
- PMD_DRV_LOG(DEBUG, "Begin common phy init");
+ elink_set_mdio_clk(sc, chip_id, GRCBASE_EMAC0);
+ elink_set_mdio_clk(sc, chip_id, GRCBASE_EMAC1);
+ ELINK_DEBUG_P0(sc, "Begin common phy init");
if (CHIP_IS_E3(sc)) {
/* Enable EPIO */
val = REG_RD(sc, MISC_REG_GEN_PURP_HWG);
@@ -12625,14 +14756,14 @@ elink_status_t elink_common_init_phy(struct bnx2x_softc * sc,
offsetof(struct shmem_region,
port_mb[PORT_0].ext_phy_fw_version));
if (phy_ver) {
- PMD_DRV_LOG(DEBUG, "Not doing common init; phy ver is 0x%x",
- phy_ver);
+ ELINK_DEBUG_P1(sc, "Not doing common init; phy ver is 0x%x",
+ phy_ver);
return ELINK_STATUS_OK;
}
/* Read the ext_phy_type for arbitrary port(0) */
for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS;
- phy_index++) {
+ phy_index++) {
ext_phy_config = elink_get_ext_phy_config(sc,
shmem_base_path[0],
phy_index, 0);
@@ -12655,10 +14786,9 @@ static void elink_check_over_curr(struct elink_params *params,
cfg_pin = (REG_RD(sc, params->shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].
- e3_cmn_pin_cfg1)) &
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
- PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+ PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
/* Ignore check if no external input PIN available */
if (elink_get_cfg_pin(sc, cfg_pin, &pin_val) != ELINK_STATUS_OK)
@@ -12666,13 +14796,16 @@ static void elink_check_over_curr(struct elink_params *params,
if (!pin_val) {
if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
- elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, params->port); //"Error: Power fault on Port %d has"
- // " been detected and the power to "
- // "that SFP+ module has been removed"
- // " to prevent failure of the card."
- // " Please remove the SFP+ module and"
- // " restart the system to clear this"
- // " error.",
+ elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT,
+ params->port);
+ /* "Error: Power fault on Port %d has"
+ * " been detected and the power to "
+ * "that SFP+ module has been removed"
+ * " to prevent failure of the card."
+ * " Please remove the SFP+ module and"
+ * " restart the system to clear this"
+ * " error.",
+ */
vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
elink_warpcore_power_module(params, 0);
}
@@ -12682,9 +14815,9 @@ static void elink_check_over_curr(struct elink_params *params,
/* Returns 0 if no change occurred since last check; 1 otherwise. */
static uint8_t elink_analyze_link_error(struct elink_params *params,
- struct elink_vars *vars,
- uint32_t status, uint32_t phy_flag,
- uint32_t link_flag, uint8_t notify)
+ struct elink_vars *vars, uint32_t status,
+ uint32_t phy_flag, uint32_t link_flag,
+ uint8_t notify)
{
struct bnx2x_softc *sc = params->sc;
/* Compare new value with previous value */
@@ -12697,16 +14830,20 @@ static uint8_t elink_analyze_link_error(struct elink_params *params,
/* If values differ */
switch (phy_flag) {
case PHY_HALF_OPEN_CONN_FLAG:
- PMD_DRV_LOG(DEBUG, "Analyze Remote Fault");
+ ELINK_DEBUG_P0(sc, "Analyze Remote Fault");
break;
case PHY_SFP_TX_FAULT_FLAG:
- PMD_DRV_LOG(DEBUG, "Analyze TX Fault");
+ ELINK_DEBUG_P0(sc, "Analyze TX Fault");
break;
default:
- PMD_DRV_LOG(DEBUG, "Analyze UNKNOWN");
+ ELINK_DEBUG_P0(sc, "Analyze UNKNOWN");
}
- PMD_DRV_LOG(DEBUG, "Link changed:[%x %x]->%x", vars->link_up,
- old_status, status);
+ ELINK_DEBUG_P3(sc, "Link changed:[%x %x]->%x", vars->link_up,
+ old_status, status);
+
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
/* a. Update shmem->link_status accordingly
* b. Update elink_vars->link_up
@@ -12749,17 +14886,18 @@ static uint8_t elink_analyze_link_error(struct elink_params *params,
}
/******************************************************************************
-* Description:
-* This function checks for half opened connection change indication.
-* When such change occurs, it calls the elink_analyze_link_error
-* to check if Remote Fault is set or cleared. Reception of remote fault
-* status message in the MAC indicates that the peer's MAC has detected
-* a fault, for example, due to break in the TX side of fiber.
-*
-******************************************************************************/
-static elink_status_t elink_check_half_open_conn(struct elink_params *params,
- struct elink_vars *vars,
- uint8_t notify)
+ * Description:
+ * This function checks for half opened connection change indication.
+ * When such change occurs, it calls the elink_analyze_link_error
+ * to check if Remote Fault is set or cleared. Reception of remote fault
+ * status message in the MAC indicates that the peer's MAC has detected
+ * a fault, for example, due to break in the TX side of fiber.
+ *
+ ******************************************************************************/
+static
+elink_status_t elink_check_half_open_conn(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t notify)
{
struct bnx2x_softc *sc = params->sc;
uint32_t lss_status = 0;
@@ -12771,7 +14909,7 @@ static elink_status_t elink_check_half_open_conn(struct elink_params *params,
if (CHIP_IS_E3(sc) &&
(REG_RD(sc, MISC_REG_RESET_REG_2) &
- (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
/* Check E3 XMAC */
/* Note that link speed cannot be queried here, since it may be
* zero while link is down. In case UMAC is active, LSS will
@@ -12796,7 +14934,7 @@ static elink_status_t elink_check_half_open_conn(struct elink_params *params,
uint32_t lss_status_reg;
uint32_t wb_data[2];
mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
/* Read BIGMAC_REGISTER_RX_LSS_STATUS */
if (CHIP_IS_E2(sc))
lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
@@ -12812,7 +14950,6 @@ static elink_status_t elink_check_half_open_conn(struct elink_params *params,
}
return ELINK_STATUS_OK;
}
-
static void elink_sfp_tx_fault_detection(struct elink_phy *phy,
struct elink_params *params,
struct elink_vars *vars)
@@ -12823,15 +14960,12 @@ static void elink_sfp_tx_fault_detection(struct elink_phy *phy,
/* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
cfg_pin = (REG_RD(sc, params->shmem_base + offsetof(struct shmem_region,
- dev_info.
- port_hw_config
- [port].
- e3_cmn_pin_cfg)) &
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
PORT_HW_CFG_E3_TX_FAULT_MASK) >>
- PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
if (elink_get_cfg_pin(sc, cfg_pin, &value)) {
- PMD_DRV_LOG(DEBUG, "Failed to read pin 0x%02x", cfg_pin);
+ ELINK_DEBUG_P1(sc, "Failed to read pin 0x%02x", cfg_pin);
return;
}
@@ -12853,24 +14987,25 @@ static void elink_sfp_tx_fault_detection(struct elink_phy *phy,
/* If module is unapproved, led should be on regardless */
if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) {
- PMD_DRV_LOG(DEBUG, "Change TX_Fault LED: ->%x",
- led_mode);
+ ELINK_DEBUG_P1(sc, "Change TX_Fault LED: ->%x",
+ led_mode);
elink_set_e3_module_fault_led(params, led_mode);
}
}
}
-
static void elink_kr2_recovery(struct elink_params *params,
- struct elink_vars *vars, struct elink_phy *phy)
+ struct elink_vars *vars,
+ struct elink_phy *phy)
{
- PMD_DRV_LOG(DEBUG, "KR2 recovery");
-
+ struct bnx2x_softc *sc = params->sc;
+ ELINK_DEBUG_P0(sc, "KR2 recovery");
elink_warpcore_enable_AN_KR2(phy, params, vars);
elink_warpcore_restart_AN_KR(phy, params);
}
static void elink_check_kr2_wa(struct elink_params *params,
- struct elink_vars *vars, struct elink_phy *phy)
+ struct elink_vars *vars,
+ struct elink_phy *phy)
{
struct bnx2x_softc *sc = params->sc;
uint16_t base_page, next_page, not_kr2_device, lane;
@@ -12888,14 +15023,14 @@ static void elink_check_kr2_wa(struct elink_params *params,
sigdet = elink_warpcore_get_sigdet(phy, params);
if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
elink_kr2_recovery(params, vars, phy);
- PMD_DRV_LOG(DEBUG, "No sigdet");
+ ELINK_DEBUG_P0(sc, "No sigdet");
}
return;
}
- lane = elink_get_warpcore_lane(params);
+ lane = elink_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
elink_cl45_read(sc, phy, MDIO_AN_DEVAD,
@@ -12906,9 +15041,9 @@ static void elink_check_kr2_wa(struct elink_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
elink_kr2_recovery(params, vars, phy);
- PMD_DRV_LOG(DEBUG, "No BP");
+ ELINK_DEBUG_P0(sc, "No BP");
}
return;
}
@@ -12922,10 +15057,10 @@ static void elink_check_kr2_wa(struct elink_params *params,
((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
- PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page,
- next_page);
+ ELINK_DEBUG_P2(sc, "BP=0x%x, NP=0x%x", base_page,
+ next_page);
elink_kr2_recovery(params, vars, phy);
}
return;
@@ -12933,7 +15068,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
/* KR2 is enabled, but not KR2 device */
if (not_kr2_device) {
/* Disable KR2 on both lanes */
- PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page, next_page);
+ ELINK_DEBUG_P2(sc, "BP=0x%x, NP=0x%x", base_page, next_page);
elink_disable_kr2(params, vars, phy);
/* Restart AN on leading lane */
elink_warpcore_restart_AN_KR(phy, params);
@@ -12949,9 +15084,8 @@ void elink_period_func(struct elink_params *params, struct elink_vars *vars)
if (params->phy[phy_idx].flags & ELINK_FLAGS_TX_ERROR_CHECK) {
elink_set_aer_mmd(params, &params->phy[phy_idx]);
if (elink_check_half_open_conn(params, vars, 1) !=
- ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Fault detection failed");
- }
+ ELINK_STATUS_OK)
+ ELINK_DEBUG_P0(sc, "Fault detection failed");
break;
}
}
@@ -12959,22 +15093,24 @@ void elink_period_func(struct elink_params *params, struct elink_vars *vars)
if (CHIP_IS_E3(sc)) {
struct elink_phy *phy = &params->phy[ELINK_INT_PHY];
elink_set_aer_mmd(params, phy);
- if ((phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) &&
- (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+ if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+ (phy->req_line_speed == ELINK_SPEED_20000))
elink_check_kr2_wa(params, vars, phy);
elink_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)
elink_warpcore_config_runtime(phy, params, vars);
if ((REG_RD(sc, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].
- default_cfg))
- & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
PORT_HW_CFG_NET_SERDES_IF_SFI) {
- if (elink_is_sfp_module_plugged(params)) {
+ if (elink_is_sfp_module_plugged(phy, params)) {
elink_sfp_tx_fault_detection(phy, params, vars);
- } else if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) {
+ } else if (vars->link_status &
+ LINK_STATUS_SFP_TX_FAULT) {
/* Clean trail, interrupt corrects the leds */
vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
@@ -12986,17 +15122,18 @@ void elink_period_func(struct elink_params *params, struct elink_vars *vars)
}
uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc,
- uint32_t shmem_base,
- uint32_t shmem2_base, uint8_t port)
+ uint32_t shmem_base,
+ uint32_t shmem2_base,
+ uint8_t port)
{
uint8_t phy_index, fan_failure_det_req = 0;
struct elink_phy phy;
for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS;
- phy_index++) {
+ phy_index++) {
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
port, &phy)
!= ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ ELINK_DEBUG_P0(sc, "populate phy failed");
return 0;
}
fan_failure_det_req |= (phy.flags &
@@ -13016,24 +15153,27 @@ void elink_hw_reset_phy(struct elink_params *params)
ELINK_NIG_MASK_SERDES0_LINK_STATUS |
ELINK_NIG_MASK_MI_INT));
- for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; phy_index++) {
+ for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS;
+ phy_index++) {
if (params->phy[phy_index].hw_reset) {
- params->phy[phy_index].hw_reset(&params->phy[phy_index],
- params);
+ params->phy[phy_index].hw_reset(
+ &params->phy[phy_index],
+ params);
params->phy[phy_index] = phy_null;
}
}
}
void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
- __rte_unused uint32_t chip_id, uint32_t shmem_base,
- uint32_t shmem2_base, uint8_t port)
+ uint32_t chip_id, uint32_t shmem_base,
+ uint32_t shmem2_base,
+ uint8_t port)
{
uint8_t gpio_num = 0xff, gpio_port = 0xff, phy_index;
uint32_t val;
uint32_t offset, aeu_mask, swap_val, swap_override, sync_offset;
if (CHIP_IS_E3(sc)) {
- if (elink_get_mod_abs_int_cfg(sc,
+ if (elink_get_mod_abs_int_cfg(sc, chip_id,
shmem_base,
port,
&gpio_num,
@@ -13042,11 +15182,11 @@ void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
} else {
struct elink_phy phy;
for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS;
- phy_index++) {
+ phy_index++) {
if (elink_populate_phy(sc, phy_index, shmem_base,
shmem2_base, port, &phy)
!= ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ ELINK_DEBUG_P0(sc, "populate phy failed");
return;
}
if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726) {
@@ -13069,15 +15209,15 @@ void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
gpio_port ^= (swap_val && swap_override);
vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
- (gpio_num + (gpio_port << 2));
+ (gpio_num + (gpio_port << 2));
sync_offset = shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].aeu_int_mask);
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
REG_WR(sc, sync_offset, vars->aeu_int_mask);
- PMD_DRV_LOG(DEBUG, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x",
- gpio_num, gpio_port, vars->aeu_int_mask);
+ ELINK_DEBUG_P3(sc, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x",
+ gpio_num, gpio_port, vars->aeu_int_mask);
if (port == 0)
offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index 40000c24..c8b08bc3 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -14,7 +14,7 @@
#ifndef ELINK_H
#define ELINK_H
-#define ELINK_DEBUG
+#include "bnx2x_logs.h"
@@ -29,6 +29,11 @@ struct bnx2x_softc;
extern uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr);
extern void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val);
+/* wb_write - pointer to 2 32 bits vars to be passed to the DMAE*/
+extern void elink_cb_reg_wb_write(struct bnx2x_softc *sc, uint32_t offset,
+ uint32_t *wb_write, uint16_t len);
+extern void elink_cb_reg_wb_read(struct bnx2x_softc *sc, uint32_t offset,
+ uint32_t *wb_write, uint16_t len);
/* mode - 0( LOW ) /1(HIGH)*/
extern uint8_t elink_cb_gpio_write(struct bnx2x_softc *sc,
@@ -45,6 +50,9 @@ extern uint8_t elink_cb_gpio_int_write(struct bnx2x_softc *sc,
extern uint32_t elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param);
+/* Delay */
+extern void elink_cb_udelay(struct bnx2x_softc *sc, uint32_t microsecond);
+
/* This function is called every 1024 bytes downloading of phy firmware.
Driver can use it to print to screen indication for download progress */
extern void elink_cb_download_progress(struct bnx2x_softc *sc, uint32_t cur, uint32_t total);
@@ -69,6 +77,8 @@ typedef enum elink_status {
extern void elink_cb_event_log(struct bnx2x_softc *sc, const elink_log_id_t log_id, ...);
extern void elink_cb_load_warpcore_microcode(void);
+extern uint8_t elink_cb_path_id(struct bnx2x_softc *sc);
+
extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc);
#define ELINK_EVENT_LOG_LEVEL_ERROR 1
@@ -78,6 +88,32 @@ extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc);
#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
/* Debug prints */
+#ifdef ELINK_DEBUG
+
+extern void elink_cb_dbg(struct bnx2x_softc *sc, const char *fmt);
+extern void elink_cb_dbg1(struct bnx2x_softc *sc, const char *fmt,
+ uint32_t arg1);
+extern void elink_cb_dbg2(struct bnx2x_softc *sc, const char *fmt,
+ uint32_t arg1, uint32_t arg2);
+extern void elink_cb_dbg3(struct bnx2x_softc *sc, const char *fmt,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3);
+
+#define ELINK_DEBUG_P0(sc, fmt) elink_cb_dbg(sc, fmt)
+#define ELINK_DEBUG_P1(sc, fmt, arg1) elink_cb_dbg1(sc, fmt, arg1)
+#define ELINK_DEBUG_P2(sc, fmt, arg1, arg2) \
+ elink_cb_dbg2(sc, fmt, arg1, arg2)
+#define ELINK_DEBUG_P3(sc, fmt, arg1, arg2, arg3) \
+ elink_cb_dbg3(sc, fmt, arg1, arg2, arg3)
+#else
+#define ELINK_DEBUG_P0(sc, fmt) PMD_DRV_LOG(DEBUG, sc, fmt)
+#define ELINK_DEBUG_P1(sc, fmt, arg1) \
+ PMD_DRV_LOG(DEBUG, sc, fmt, arg1)
+#define ELINK_DEBUG_P2(sc, fmt, arg1, arg2) \
+ PMD_DRV_LOG(DEBUG, sc, fmt, arg1, arg2)
+#define ELINK_DEBUG_P3(sc, fmt, arg1, arg2, arg3) \
+ PMD_DRV_LOG(DEBUG, sc, fmt, arg1, arg2, arg3)
+#endif
/***********************************************************/
/* Defines */
@@ -126,9 +162,12 @@ extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc);
#define ELINK_SFP_EEPROM_DATE_SIZE 6
#define ELINK_SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
#define ELINK_SFP_EEPROM_DIAG_TYPE_SIZE 1
-#define ELINK_SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define ELINK_SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1 << 2)
#define ELINK_SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
#define ELINK_SFP_EEPROM_SFF_8472_COMP_SIZE 1
+#define ELINK_SFP_EEPROM_VENDOR_SPECIFIC_ADDR 0x60
+#define ELINK_SFP_EEPROM_VENDOR_SPECIFIC_SIZE 16
+
#define ELINK_SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
#define ELINK_SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
@@ -199,7 +238,7 @@ typedef void (*link_reset_t)(struct elink_phy *phy,
struct elink_params *params);
typedef void (*config_loopback_t)(struct elink_phy *phy,
struct elink_params *params);
-typedef uint8_t (*format_fw_ver_t)(uint32_t raw, uint8_t *str, uint16_t *len);
+typedef elink_status_t (*format_fw_ver_t)(uint32_t raw, uint8_t *str, uint16_t *len);
typedef void (*hw_reset_t)(struct elink_phy *phy, struct elink_params *params);
typedef void (*set_link_led_t)(struct elink_phy *phy,
struct elink_params *params, uint8_t mode);
@@ -219,23 +258,23 @@ struct elink_phy {
uint8_t def_md_devad;
uint16_t flags;
/* No Over-Current detection */
-#define ELINK_FLAGS_NOC (1<<1)
+#define ELINK_FLAGS_NOC (1 << 1)
/* Fan failure detection required */
-#define ELINK_FLAGS_FAN_FAILURE_DET_REQ (1<<2)
+#define ELINK_FLAGS_FAN_FAILURE_DET_REQ (1 << 2)
/* Initialize first the XGXS and only then the phy itself */
-#define ELINK_FLAGS_INIT_XGXS_FIRST (1<<3)
-#define ELINK_FLAGS_WC_DUAL_MODE (1<<4)
-#define ELINK_FLAGS_4_PORT_MODE (1<<5)
-#define ELINK_FLAGS_REARM_LATCH_SIGNAL (1<<6)
-#define ELINK_FLAGS_SFP_NOT_APPROVED (1<<7)
-#define ELINK_FLAGS_MDC_MDIO_WA (1<<8)
-#define ELINK_FLAGS_DUMMY_READ (1<<9)
-#define ELINK_FLAGS_MDC_MDIO_WA_B0 (1<<10)
-#define ELINK_FLAGS_SFP_MODULE_PLUGGED_IN_WC (1<<11)
-#define ELINK_FLAGS_TX_ERROR_CHECK (1<<12)
-#define ELINK_FLAGS_EEE (1<<13)
-#define ELINK_FLAGS_TEMPERATURE (1<<14)
-#define ELINK_FLAGS_MDC_MDIO_WA_G (1<<15)
+#define ELINK_FLAGS_INIT_XGXS_FIRST (1 << 3)
+#define ELINK_FLAGS_WC_DUAL_MODE (1 << 4)
+#define ELINK_FLAGS_4_PORT_MODE (1 << 5)
+#define ELINK_FLAGS_REARM_LATCH_SIGNAL (1 << 6)
+#define ELINK_FLAGS_SFP_NOT_APPROVED (1 << 7)
+#define ELINK_FLAGS_MDC_MDIO_WA (1 << 8)
+#define ELINK_FLAGS_DUMMY_READ (1 << 9)
+#define ELINK_FLAGS_MDC_MDIO_WA_B0 (1 << 10)
+#define ELINK_FLAGS_SFP_MODULE_PLUGGED_IN_WC (1 << 11)
+#define ELINK_FLAGS_TX_ERROR_CHECK (1 << 12)
+#define ELINK_FLAGS_EEE (1 << 13)
+#define ELINK_FLAGS_TEMPERATURE (1 << 14)
+#define ELINK_FLAGS_MDC_MDIO_WA_G (1 << 15)
/* preemphasis values for the rx side */
uint16_t rx_preemphasis[4];
@@ -247,20 +286,22 @@ struct elink_phy {
uint32_t mdio_ctrl;
uint32_t supported;
-#define ELINK_SUPPORTED_10baseT_Half (1<<0)
-#define ELINK_SUPPORTED_10baseT_Full (1<<1)
-#define ELINK_SUPPORTED_100baseT_Half (1<<2)
-#define ELINK_SUPPORTED_100baseT_Full (1<<3)
-#define ELINK_SUPPORTED_1000baseT_Full (1<<4)
-#define ELINK_SUPPORTED_2500baseX_Full (1<<5)
-#define ELINK_SUPPORTED_10000baseT_Full (1<<6)
-#define ELINK_SUPPORTED_TP (1<<7)
-#define ELINK_SUPPORTED_FIBRE (1<<8)
-#define ELINK_SUPPORTED_Autoneg (1<<9)
-#define ELINK_SUPPORTED_Pause (1<<10)
-#define ELINK_SUPPORTED_Asym_Pause (1<<11)
-#define ELINK_SUPPORTED_20000baseMLD2_Full (1<<21)
-#define ELINK_SUPPORTED_20000baseKR2_Full (1<<22)
+#define ELINK_SUPPORTED_10baseT_Half (1 << 0)
+#define ELINK_SUPPORTED_10baseT_Full (1 << 1)
+#define ELINK_SUPPORTED_100baseT_Half (1 << 2)
+#define ELINK_SUPPORTED_100baseT_Full (1 << 3)
+#define ELINK_SUPPORTED_1000baseT_Full (1 << 4)
+#define ELINK_SUPPORTED_2500baseX_Full (1 << 5)
+#define ELINK_SUPPORTED_10000baseT_Full (1 << 6)
+#define ELINK_SUPPORTED_TP (1 << 7)
+#define ELINK_SUPPORTED_FIBRE (1 << 8)
+#define ELINK_SUPPORTED_Autoneg (1 << 9)
+#define ELINK_SUPPORTED_Pause (1 << 10)
+#define ELINK_SUPPORTED_Asym_Pause (1 << 11)
+#define ELINK_SUPPORTED_1000baseKX_Full (1 << 17)
+#define ELINK_SUPPORTED_10000baseKR_Full (1 << 19)
+#define ELINK_SUPPORTED_20000baseMLD2_Full (1 << 21)
+#define ELINK_SUPPORTED_20000baseKR2_Full (1 << 22)
uint32_t media_type;
#define ELINK_ETH_PHY_UNSPECIFIED 0x0
@@ -353,17 +394,22 @@ struct elink_params {
/* features */
uint32_t feature_config_flags;
-#define ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define ELINK_FEATURE_CONFIG_PFC_ENABLED (1<<1)
-#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
-#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
-#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
-#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
-#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
-#define ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
-#define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST (1<<12)
-#define ELINK_FEATURE_CONFIG_MT_SUPPORT (1<<13)
-#define ELINK_FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
+#define ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1 << 0)
+#define ELINK_FEATURE_CONFIG_PFC_ENABLED (1 << 1)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1 << 2)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1 << 3)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC (1 << 4)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC (1 << 5)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC (1 << 6)
+#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC (1 << 7)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX (1 << 8)
+#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED (1 << 9)
+#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1 << 10)
+#define ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1 << 11)
+#define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST (1 << 12)
+#define ELINK_FEATURE_CONFIG_MT_SUPPORT (1 << 13)
+#define ELINK_FEATURE_CONFIG_BOOT_FROM_SAN (1 << 14)
+#define ELINK_FEATURE_CONFIG_DISABLE_PD (1 << 15)
/* Will be populated during common init */
struct elink_phy phy[ELINK_MAX_PHYS];
@@ -391,10 +437,10 @@ struct elink_params {
#define ELINK_EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
#define ELINK_EEE_MODE_NVRAM_MASK (0x3)
#define ELINK_EEE_MODE_TIMER_MASK (0xfffff)
-#define ELINK_EEE_MODE_OUTPUT_TIME (1<<28)
-#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1<<29)
-#define ELINK_EEE_MODE_ENABLE_LPI (1<<30)
-#define ELINK_EEE_MODE_ADV_LPI (1<<31)
+#define ELINK_EEE_MODE_OUTPUT_TIME (1 << 28)
+#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1 << 29)
+#define ELINK_EEE_MODE_ENABLE_LPI (1 << 30)
+#define ELINK_EEE_MODE_ADV_LPI (1 << 31)
uint16_t hw_led_mode; /* part of the hw_config read from the shmem */
uint32_t multi_phy_config;
@@ -404,20 +450,23 @@ struct elink_params {
uint16_t req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
uint16_t link_flags;
-#define ELINK_LINK_FLAGS_INT_DISABLED (1<<0)
-#define ELINK_PHY_INITIALIZED (1<<1)
+#define ELINK_LINK_FLAGS_INT_DISABLED (1 << 0)
+#define ELINK_PHY_INITIALIZED (1 << 1)
uint32_t lfa_base;
+
+ /* The same definitions as the shmem2 parameter */
+ uint32_t link_attr_sync;
};
/* Output parameters */
struct elink_vars {
uint8_t phy_flags;
-#define PHY_XGXS_FLAG (1<<0)
-#define PHY_SGMII_FLAG (1<<1)
-#define PHY_PHYSICAL_LINK_FLAG (1<<2)
-#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
-#define PHY_OVER_CURRENT_FLAG (1<<4)
-#define PHY_SFP_TX_FAULT_FLAG (1<<5)
+#define PHY_XGXS_FLAG (1 << 0)
+#define PHY_SGMII_FLAG (1 << 1)
+#define PHY_PHYSICAL_LINK_FLAG (1 << 2)
+#define PHY_HALF_OPEN_CONN_FLAG (1 << 3)
+#define PHY_OVER_CURRENT_FLAG (1 << 4)
+#define PHY_SFP_TX_FAULT_FLAG (1 << 5)
uint8_t mac_type;
#define ELINK_MAC_TYPE_NONE 0
@@ -448,8 +497,7 @@ struct elink_vars {
uint8_t rx_tx_asic_rst;
uint8_t turn_to_run_wc_rt;
uint16_t rsrv2;
- /* The same definitions as the shmem2 parameter */
- uint32_t link_attr_sync;
+
};
/***********************************************************/
@@ -460,14 +508,32 @@ elink_status_t elink_phy_init(struct elink_params *params, struct elink_vars *va
/* Reset the link. Should be called when driver or interface goes down
Before calling phy firmware upgrade, the reset_ext_phy should be set
to 0 */
+elink_status_t elink_link_reset(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t reset_ext_phy);
elink_status_t elink_lfa_reset(struct elink_params *params, struct elink_vars *vars);
/* elink_link_update should be called upon link interrupt */
elink_status_t elink_link_update(struct elink_params *params, struct elink_vars *vars);
+/* use the following phy functions to read/write from external_phy
+ * In order to use it to read/write internal phy registers, use
+ * ELINK_DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
+ * the register
+ */
+elink_status_t elink_phy_read(struct elink_params *params, uint8_t phy_addr,
+ uint8_t devad, uint16_t reg, uint16_t *ret_val);
+
+elink_status_t elink_phy_write(struct elink_params *params, uint8_t phy_addr,
+ uint8_t devad, uint16_t reg, uint16_t val);
+
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void elink_link_status_update(struct elink_params *input,
struct elink_vars *output);
+/* returns string representing the fw_version of the external phy */
+elink_status_t elink_get_ext_phy_fw_version(struct elink_params *params,
+ uint8_t *version,
+ uint16_t len);
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
@@ -481,12 +547,34 @@ elink_status_t elink_set_led(struct elink_params *params,
#define ELINK_LED_MODE_FRONT_PANEL_OFF 3
/* elink_handle_module_detect_int should be called upon module detection
- interrupt */
+ * interrupt
+ */
void elink_handle_module_detect_int(struct elink_params *params);
+/* Get the actual link status. In case it returns ELINK_STATUS_OK, link is up,
+ * otherwise link is down
+ */
+elink_status_t elink_test_link(struct elink_params *params,
+ struct elink_vars *vars,
+ uint8_t is_serdes);
+
+
/* One-time initialization for external phy after power up */
elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base_path[],
- uint32_t shmem2_base_path[], uint32_t chip_id, uint8_t one_port_enabled);
+ uint32_t shmem2_base_path[], uint32_t chip_id,
+ uint8_t one_port_enabled);
+
+/* Reset the external PHY using GPIO */
+void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port);
+
+/* Reset the external of SFX7101 */
+void elink_sfx7101_sp_sw_reset(struct bnx2x_softc *sc, struct elink_phy *phy);
+
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
+ struct elink_params *params, uint8_t dev_addr,
+ uint16_t addr, uint16_t byte_cnt,
+ uint8_t *o_buf);
void elink_hw_reset_phy(struct elink_params *params);
@@ -569,12 +657,42 @@ elink_status_t elink_update_pfc(struct elink_params *params,
struct elink_vars *vars,
struct elink_nig_brb_pfc_port_params *pfc_params);
+
+/* Used to configure the ETS to disable */
+elink_status_t elink_ets_disabled(struct elink_params *params,
+ struct elink_vars *vars);
+
+/* Used to configure the ETS to BW limited */
+void elink_ets_bw_limit(const struct elink_params *params,
+ const uint32_t cos0_bw,
+ const uint32_t cos1_bw);
+
+/* Used to configure the ETS to strict */
+elink_status_t elink_ets_strict(const struct elink_params *params,
+ const uint8_t strict_cos);
+
+
+/* Configure the COS to ETS according to BW and SP settings.*/
+elink_status_t elink_ets_e3b0_config(const struct elink_params *params,
+ const struct elink_vars *vars,
+ struct elink_ets_params *ets_params);
+/* Read pfc statistic*/
+void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars,
+ uint32_t pfc_frames_sent[2],
+ uint32_t pfc_frames_received[2]);
void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base,
uint8_t port);
+/* elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
+ * struct elink_params *params);
+ */
void elink_period_func(struct elink_params *params, struct elink_vars *vars);
+/*elink_status_t elink_check_half_open_conn(struct elink_params *params,
+ * struct elink_vars *vars, uint8_t notify);
+ */
+
void elink_enable_pmd_tx(struct elink_params *params);
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index db5c4eb0..f75b0ad3 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -20,7 +20,7 @@
#include "bnxt_cpr.h"
-#define BNXT_MAX_MTU 9500
+#define BNXT_MAX_MTU 9574
#define VLAN_TAG_SIZE 4
#define BNXT_VF_RSV_NUM_RSS_CTX 1
#define BNXT_VF_RSV_NUM_L2_CTX 4
@@ -33,6 +33,13 @@
#define BNXT_MAX_RX_RING_DESC 8192
#define BNXT_DB_SIZE 0x80
+/* Chimp Communication Channel */
+#define GRCPF_REG_CHIMP_CHANNEL_OFFSET 0x0
+#define GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+/* Kong Communication Channel */
+#define GRCPF_REG_KONG_CHANNEL_OFFSET 0xA00
+#define GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+
#define BNXT_INT_LAT_TMR_MIN 75
#define BNXT_INT_LAT_TMR_MAX 150
#define BNXT_NUM_CMPL_AGGR_INT 36
@@ -250,6 +257,11 @@ struct bnxt {
#define BNXT_FLAG_UPDATE_HASH (1 << 5)
#define BNXT_FLAG_PTP_SUPPORTED (1 << 6)
#define BNXT_FLAG_MULTI_HOST (1 << 7)
+#define BNXT_FLAG_EXT_RX_PORT_STATS (1 << 8)
+#define BNXT_FLAG_EXT_TX_PORT_STATS (1 << 9)
+#define BNXT_FLAG_KONG_MB_EN (1 << 10)
+#define BNXT_FLAG_TRUSTED_VF_EN (1 << 11)
+#define BNXT_FLAG_DFLT_VNIC_SET (1 << 12)
#define BNXT_FLAG_NEW_RM (1 << 30)
#define BNXT_FLAG_INIT_DONE (1 << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
@@ -257,6 +269,9 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
+#define BNXT_USE_CHIMP_MB 0 //For non-CFA commands, everything uses Chimp.
+#define BNXT_USE_KONG(bp) ((bp)->flags & BNXT_FLAG_KONG_MB_EN)
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
@@ -264,6 +279,9 @@ struct bnxt {
const void *rx_mem_zone;
struct rx_port_stats *hw_rx_port_stats;
rte_iova_t hw_rx_port_stats_map;
+ struct rx_port_stats_ext *hw_rx_port_stats_ext;
+ rte_iova_t hw_rx_port_stats_ext_map;
+ uint16_t fw_rx_port_stats_ext_size;
unsigned int tx_nr_rings;
unsigned int tx_cp_nr_rings;
@@ -271,6 +289,9 @@ struct bnxt {
const void *tx_mem_zone;
struct tx_port_stats *hw_tx_port_stats;
rte_iova_t hw_tx_port_stats_map;
+ struct tx_port_stats_ext *hw_tx_port_stats_ext;
+ rte_iova_t hw_tx_port_stats_ext_map;
+ uint16_t fw_tx_port_stats_ext_size;
/* Default completion ring */
struct bnxt_cp_ring_info *def_cp_ring;
@@ -285,16 +306,13 @@ struct bnxt {
struct bnxt_filter_info *filter_info;
STAILQ_HEAD(, bnxt_filter_info) free_filter_list;
- /* VNIC pointer for flow filter (VMDq) pools */
-#define MAX_FF_POOLS 256
- STAILQ_HEAD(, bnxt_vnic_info) ff_pool[MAX_FF_POOLS];
-
struct bnxt_irq *irq_tbl;
#define MAX_NUM_MAC_ADDR 32
uint8_t mac_addr[ETHER_ADDR_LEN];
uint16_t hwrm_cmd_seq;
+ uint16_t kong_cmd_seq;
void *hwrm_cmd_resp_addr;
rte_iova_t hwrm_cmd_resp_dma_addr;
void *hwrm_short_cmd_req_addr;
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index ff20b6fd..0fd6e51e 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -35,6 +35,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
+ bnxt_hwrm_func_qcfg(bp);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
PMD_DRV_LOG(INFO, "Port conn async event\n");
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index cc7e4391..801c6ffa 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -149,7 +149,6 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_CRC_STRIP | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_TCP_LRO)
@@ -203,7 +202,9 @@ static int bnxt_init_chip(struct bnxt *bp)
struct bnxt_rx_queue *rxq;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint64_t rx_offloads = dev_conf->rxmode.offloads;
uint32_t intr_vector = 0;
uint32_t queue_id, base = BNXT_MISC_VEC_ID;
uint32_t vec = BNXT_MISC_VEC_ID;
@@ -263,6 +264,9 @@ static int bnxt_init_chip(struct bnxt *bp)
}
memset(vnic->fw_grp_ids, -1, size);
+ PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
+ i, vnic, vnic->fw_grp_ids);
+
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
@@ -281,6 +285,16 @@ static int bnxt_init_chip(struct bnxt *bp)
}
}
+ /*
+ * Firmware sets pf pair in default vnic cfg. If the VLAN strip
+ * setting is not available at this time, it will not be
+ * configured correctly in the CFA.
+ */
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ vnic->vlan_strip = true;
+ else
+ vnic->vlan_strip = false;
+
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
@@ -299,6 +313,10 @@ static int bnxt_init_chip(struct bnxt *bp)
for (j = 0; j < bp->rx_nr_rings; j++) {
rxq = bp->eth_dev->data->rx_queues[j];
+ PMD_DRV_LOG(DEBUG,
+ "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
+ j, rxq->vnic, rxq->vnic->fw_grp_ids);
+
if (rxq->rx_deferred_start)
rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
}
@@ -445,7 +463,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
- + VLAN_TAG_SIZE;
+ + VLAN_TAG_SIZE * 2;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
@@ -694,7 +712,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
if (bp->dev_stopped == 0)
bnxt_dev_stop_op(eth_dev);
- bnxt_free_mem(bp);
if (eth_dev->data->mac_addrs != NULL) {
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -714,34 +731,30 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter, *temp_filter;
- uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
uint32_t i;
/*
* Loop through all VNICs from the specified filter flow pools to
* remove the corresponding MAC addr filter
*/
- for (i = 0; i < pool; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
if (!(pool_mask & (1ULL << i)))
continue;
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
- filter = STAILQ_FIRST(&vnic->filter);
- while (filter) {
- temp_filter = STAILQ_NEXT(filter, next);
- if (filter->mac_index == index) {
- STAILQ_REMOVE(&vnic->filter, filter,
- bnxt_filter_info, next);
- bnxt_hwrm_clear_l2_filter(bp, filter);
- filter->mac_index = INVALID_MAC_INDEX;
- memset(&filter->l2_addr, 0,
- ETHER_ADDR_LEN);
- STAILQ_INSERT_TAIL(
- &bp->free_filter_list,
- filter, next);
- }
- filter = temp_filter;
+ vnic = &bp->vnic_info[i];
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+ if (filter->mac_index == index) {
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ filter->mac_index = INVALID_MAC_INDEX;
+ memset(&filter->l2_addr, 0, ETHER_ADDR_LEN);
+ STAILQ_INSERT_TAIL(&bp->free_filter_list,
+ filter, next);
}
+ filter = temp_filter;
}
}
}
@@ -751,10 +764,10 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
uint32_t index, uint32_t pool)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
- struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
struct bnxt_filter_info *filter;
- if (BNXT_VF(bp)) {
+ if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}
@@ -898,12 +911,10 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
/* Update the RSS VNIC(s) */
- for (i = 0; i < MAX_FF_POOLS; i++) {
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
- memcpy(vnic->rss_table, reta_conf, reta_size);
-
- bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- }
+ for (i = 0; i < bp->max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ memcpy(vnic->rss_table, reta_conf, reta_size);
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
return 0;
}
@@ -947,7 +958,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic;
uint16_t hash_type = 0;
- int i;
+ unsigned int i;
/*
* If RSS enablement were different than dev_configure,
@@ -978,21 +989,20 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
/* Update the RSS VNIC(s) */
- for (i = 0; i < MAX_FF_POOLS; i++) {
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
- vnic->hash_type = hash_type;
-
- /*
- * Use the supplied key if the key length is
- * acceptable and the rss_key is not NULL
- */
- if (rss_conf->rss_key &&
- rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
- memcpy(vnic->rss_hash_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
-
- bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- }
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->hash_type = hash_type;
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss_conf->rss_key &&
+ rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
return 0;
}
@@ -1269,53 +1279,51 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
* else
* VLAN filter doesn't exist, just skip and continue
*/
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
- filter = STAILQ_FIRST(&vnic->filter);
- while (filter) {
- temp_filter = STAILQ_NEXT(filter, next);
-
- if (filter->enables & chk &&
- filter->l2_ovlan == vlan_id) {
- /* Must delete the filter */
- STAILQ_REMOVE(&vnic->filter, filter,
- bnxt_filter_info, next);
- bnxt_hwrm_clear_l2_filter(bp, filter);
- STAILQ_INSERT_TAIL(
- &bp->free_filter_list,
- filter, next);
-
- /*
- * Need to examine to see if the MAC
- * filter already existed or not before
- * allocating a new one
- */
-
- new_filter = bnxt_alloc_filter(bp);
- if (!new_filter) {
- PMD_DRV_LOG(ERR,
+ vnic = &bp->vnic_info[i];
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk &&
+ filter->l2_ovlan == vlan_id) {
+ /* Must delete the filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ STAILQ_INSERT_TAIL(&bp->free_filter_list,
+ filter, next);
+
+ /*
+ * Need to examine to see if the MAC
+ * filter already existed or not before
+ * allocating a new one
+ */
+
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
- rc = -ENOMEM;
- goto exit;
- }
- STAILQ_INSERT_TAIL(&vnic->filter,
- new_filter, next);
- /* Inherit MAC from previous filter */
- new_filter->mac_index =
- filter->mac_index;
- memcpy(new_filter->l2_addr,
- filter->l2_addr, ETHER_ADDR_LEN);
- /* MAC only filter */
- rc = bnxt_hwrm_set_l2_filter(bp,
- vnic->fw_vnic_id,
- new_filter);
- if (rc)
- goto exit;
- PMD_DRV_LOG(INFO,
- "Del Vlan filter for %d\n",
- vlan_id);
+ rc = -ENOMEM;
+ goto exit;
}
- filter = temp_filter;
+ STAILQ_INSERT_TAIL(&vnic->filter,
+ new_filter, next);
+ /* Inherit MAC from previous filter */
+ new_filter->mac_index =
+ filter->mac_index;
+ memcpy(new_filter->l2_addr, filter->l2_addr,
+ ETHER_ADDR_LEN);
+ /* MAC only filter */
+ rc = bnxt_hwrm_set_l2_filter(bp,
+ vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ PMD_DRV_LOG(INFO,
+ "Del Vlan filter for %d\n",
+ vlan_id);
}
+ filter = temp_filter;
}
}
exit:
@@ -1345,51 +1353,48 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
* Remove the old MAC only filter
* Add a new MAC+VLAN filter
*/
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
- filter = STAILQ_FIRST(&vnic->filter);
- while (filter) {
- temp_filter = STAILQ_NEXT(filter, next);
-
- if (filter->enables & chk) {
- if (filter->l2_ovlan == vlan_id)
- goto cont;
- } else {
- /* Must delete the MAC filter */
- STAILQ_REMOVE(&vnic->filter, filter,
- bnxt_filter_info, next);
- bnxt_hwrm_clear_l2_filter(bp, filter);
- filter->l2_ovlan = 0;
- STAILQ_INSERT_TAIL(
- &bp->free_filter_list,
- filter, next);
- }
- new_filter = bnxt_alloc_filter(bp);
- if (!new_filter) {
- PMD_DRV_LOG(ERR,
+ vnic = &bp->vnic_info[i];
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+
+ if (filter->enables & chk) {
+ if (filter->l2_ivlan == vlan_id)
+ goto cont;
+ } else {
+ /* Must delete the MAC filter */
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ filter->l2_ovlan = 0;
+ STAILQ_INSERT_TAIL(&bp->free_filter_list,
+ filter, next);
+ }
+ new_filter = bnxt_alloc_filter(bp);
+ if (!new_filter) {
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
- rc = -ENOMEM;
- goto exit;
- }
- STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
- next);
- /* Inherit MAC from the previous filter */
- new_filter->mac_index = filter->mac_index;
- memcpy(new_filter->l2_addr, filter->l2_addr,
- ETHER_ADDR_LEN);
- /* MAC + VLAN ID filter */
- new_filter->l2_ivlan = vlan_id;
- new_filter->l2_ivlan_mask = 0xF000;
- new_filter->enables |= en;
- rc = bnxt_hwrm_set_l2_filter(bp,
- vnic->fw_vnic_id,
- new_filter);
- if (rc)
- goto exit;
- PMD_DRV_LOG(INFO,
- "Added Vlan filter for %d\n", vlan_id);
-cont:
- filter = temp_filter;
+ rc = -ENOMEM;
+ goto exit;
}
+ STAILQ_INSERT_TAIL(&vnic->filter, new_filter, next);
+ /* Inherit MAC from the previous filter */
+ new_filter->mac_index = filter->mac_index;
+ memcpy(new_filter->l2_addr, filter->l2_addr,
+ ETHER_ADDR_LEN);
+ /* MAC + VLAN ID filter */
+ new_filter->l2_ivlan = vlan_id;
+ new_filter->l2_ivlan_mask = 0xF000;
+ new_filter->enables |= en;
+ rc = bnxt_hwrm_set_l2_filter(bp,
+ vnic->fw_vnic_id,
+ new_filter);
+ if (rc)
+ goto exit;
+ PMD_DRV_LOG(INFO,
+ "Added Vlan filter for %d\n", vlan_id);
+cont:
+ filter = temp_filter;
}
}
exit:
@@ -1397,7 +1402,7 @@ exit:
}
static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
- uint16_t vlan_id, int on)
+ uint16_t vlan_id, int on)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
@@ -1454,7 +1459,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
struct bnxt_filter_info *filter;
int rc;
- if (BNXT_VF(bp))
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
return -EPERM;
memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
@@ -1571,21 +1576,17 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_dev_info dev_info;
- uint32_t max_dev_mtu;
uint32_t rc = 0;
uint32_t i;
bnxt_dev_info_get_op(eth_dev, &dev_info);
- max_dev_mtu = dev_info.max_rx_pktlen -
- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
- if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
+ if (new_mtu < ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
- ETHER_MIN_MTU, max_dev_mtu);
+ ETHER_MIN_MTU, BNXT_MAX_MTU);
return -EINVAL;
}
-
if (new_mtu > ETHER_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
bp->eth_dev->data->dev_conf.rxmode.offloads |=
@@ -1805,8 +1806,8 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
goto exit;
}
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+ vnic0 = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[efilter->queue];
if (vnic == NULL) {
PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
@@ -1864,8 +1865,8 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+ vnic0 = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[efilter->queue];
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
@@ -2081,8 +2082,8 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
if (ret < 0)
goto free_filter;
- vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = &bp->vnic_info[nfilter->queue];
+ vnic0 = &bp->vnic_info[0];
filter1 = STAILQ_FIRST(&vnic0->filter);
if (filter1 == NULL) {
ret = -1;
@@ -2375,8 +2376,8 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
return -EINVAL;
}
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+ vnic0 = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[fdir->action.rx_queue];
if (vnic == NULL) {
PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
return -EINVAL;
@@ -2497,9 +2498,9 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
- vnic = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = &bp->vnic_info[0];
else
- vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+ vnic = &bp->vnic_info[fdir->action.rx_queue];
match = bnxt_match_fdir(bp, filter, &mvnic);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
@@ -3226,7 +3227,9 @@ skip_init:
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
- sizeof(struct rx_port_stats) + 512);
+ sizeof(struct rx_port_stats) +
+ sizeof(struct rx_port_stats_ext) +
+ 512);
if (!mz) {
mz = rte_memzone_reserve(mz_name, total_alloc_len,
SOCKET_ID_ANY,
@@ -3262,7 +3265,9 @@ skip_init:
mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
mz = rte_memzone_lookup(mz_name);
total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
- sizeof(struct tx_port_stats) + 512);
+ sizeof(struct tx_port_stats) +
+ sizeof(struct tx_port_stats_ext) +
+ 512);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
total_alloc_len,
@@ -3293,8 +3298,30 @@ skip_init:
bp->hw_tx_port_stats_map = mz_phys_addr;
bp->flags |= BNXT_FLAG_PORT_STATS;
+
+ /* Display extended statistics if FW supports it */
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
+ bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0)
+ goto skip_ext_stats;
+
+ bp->hw_rx_port_stats_ext = (void *)
+ (bp->hw_rx_port_stats + sizeof(struct rx_port_stats));
+ bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
+
+
+ if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) {
+ bp->hw_tx_port_stats_ext = (void *)
+ (bp->hw_tx_port_stats + sizeof(struct tx_port_stats));
+ bp->hw_tx_port_stats_ext_map =
+ bp->hw_tx_port_stats_map +
+ sizeof(struct tx_port_stats);
+ bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
+ }
}
+skip_ext_stats:
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
PMD_DRV_LOG(ERR,
@@ -3474,10 +3501,6 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
bnxt_disable_int(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
- if (eth_dev->data->mac_addrs != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- }
if (bp->grp_info != NULL) {
rte_free(bp->grp_info);
bp->grp_info = NULL;
@@ -3515,7 +3538,11 @@ static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ bnxt_dev_uninit);
+ else
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
}
static struct rte_pci_driver bnxt_rte_pmd = {
@@ -3542,7 +3569,7 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
RTE_INIT(bnxt_init_log)
{
- bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver");
if (bnxt_logtype_driver >= 0)
rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
}
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 1038941e..f43fe0db 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -80,21 +80,21 @@ void bnxt_free_all_filters(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter, *temp_filter;
- int i;
-
- for (i = 0; i < MAX_FF_POOLS; i++) {
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
- filter = STAILQ_FIRST(&vnic->filter);
- while (filter) {
- temp_filter = STAILQ_NEXT(filter, next);
- STAILQ_REMOVE(&vnic->filter, filter,
- bnxt_filter_info, next);
- STAILQ_INSERT_TAIL(&bp->free_filter_list,
- filter, next);
- filter = temp_filter;
- }
- STAILQ_INIT(&vnic->filter);
+ unsigned int i;
+
+// for (i = 0; i < MAX_FF_POOLS; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&bp->free_filter_list,
+ filter, next);
+ filter = temp_filter;
}
+ STAILQ_INIT(&vnic->filter);
}
for (i = 0; i < bp->pf.max_vfs; i++) {
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index ac765674..1afe6740 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -678,7 +678,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
struct bnxt_vnic_info *vnic0;
int rc;
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic0 = &bp->vnic_info[0];
f0 = STAILQ_FIRST(&vnic0->filter);
/* This flow has same DST MAC as the port/l2 filter. */
@@ -763,8 +763,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
}
PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
+ vnic0 = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[act_q->index];
if (vnic == NULL) {
rte_flow_error_set(error,
EINVAL,
@@ -786,7 +786,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "VNIC found\n");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic0 = &bp->vnic_info[0];
filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
if (filter1 == NULL) {
rc = -ENOSPC;
@@ -802,7 +802,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic0 = &bp->vnic_info[0];
filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
if (filter1 == NULL) {
rc = -ENOSPC;
@@ -854,7 +854,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
filter->mirror_vnic_id = dflt_vnic;
filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic0 = &bp->vnic_info[0];
filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
if (filter1 == NULL) {
rc = -ENOSPC;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index c682488a..99997605 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -26,7 +26,7 @@
#include <rte_io.h>
-#define HWRM_CMD_TIMEOUT 10000
+#define HWRM_CMD_TIMEOUT 6000000
#define HWRM_SPEC_CODE_1_8_3 0x10803
#define HWRM_VERSION_1_9_1 0x10901
@@ -70,7 +70,7 @@ static int page_roundup(size_t size)
*/
static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
- uint32_t msg_len)
+ uint32_t msg_len, bool use_kong_mb)
{
unsigned int i;
struct input *req = msg;
@@ -80,6 +80,10 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
uint8_t *valid;
uint16_t max_req_len = bp->max_req_len;
struct hwrm_short_input short_input = { 0 };
+ uint16_t bar_offset = use_kong_mb ?
+ GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
+ uint16_t mb_trigger_offset = use_kong_mb ?
+ GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
if (bp->flags & BNXT_FLAG_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
@@ -105,19 +109,19 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
/* Write request msg to hwrm channel */
for (i = 0; i < msg_len; i += 4) {
- bar = (uint8_t *)bp->bar0 + i;
+ bar = (uint8_t *)bp->bar0 + bar_offset + i;
rte_write32(*data, bar);
data++;
}
/* Zero the rest of the request space */
for (; i < max_req_len; i += 4) {
- bar = (uint8_t *)bp->bar0 + i;
+ bar = (uint8_t *)bp->bar0 + bar_offset + i;
rte_write32(0, bar);
}
/* Ring channel doorbell */
- bar = (uint8_t *)bp->bar0 + 0x100;
+ bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
rte_write32(1, bar);
/* Poll for the valid bit */
@@ -131,7 +135,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
if (*valid == HWRM_RESP_VALID_KEY)
break;
}
- rte_delay_us(600);
+ rte_delay_us(1);
}
if (i >= HWRM_CMD_TIMEOUT) {
@@ -156,12 +160,13 @@ err_ret:
*
* HWRM_UNLOCK() must be called after all response processing is completed.
*/
-#define HWRM_PREP(req, type) do { \
+#define HWRM_PREP(req, type, kong) do { \
rte_spinlock_lock(&bp->hwrm_lock); \
memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
req.req_type = rte_cpu_to_le_16(HWRM_##type); \
req.cmpl_ring = rte_cpu_to_le_16(-1); \
- req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
+ req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
+ rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
req.target_id = rte_cpu_to_le_16(0xffff); \
req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
} while (0)
@@ -220,11 +225,11 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, CFA_L2_SET_RX_MASK);
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.mask = 0;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -245,7 +250,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
return rc;
- HWRM_PREP(req, CFA_L2_SET_RX_MASK);
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
/* FIXME add multicast flag, when multicast adding options is supported
@@ -275,7 +280,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
}
req.mask = rte_cpu_to_le_32(mask);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -307,14 +312,14 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
return 0;
}
}
- HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
+ HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -332,11 +337,11 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
if (filter->fw_l2_filter_id == UINT64_MAX)
return 0;
- HWRM_PREP(req, CFA_L2_FILTER_FREE);
+ HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -375,9 +380,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
if (filter->fw_l2_filter_id != UINT64_MAX)
bnxt_hwrm_clear_l2_filter(bp, filter);
- HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
+ HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(filter->flags);
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
@@ -410,7 +417,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -430,7 +437,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
if (!ptp)
return 0;
- HWRM_PREP(req, PORT_MAC_CFG);
+ HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
if (ptp->rx_filter)
flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
@@ -447,7 +454,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
(HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_UNLOCK();
return rc;
@@ -464,11 +471,11 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
if (ptp)
return 0;
- HWRM_PREP(req, PORT_MAC_PTP_QCFG);
+ HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -513,11 +520,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
uint32_t flags;
int i;
- HWRM_PREP(req, FUNC_QCAPS);
+ HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -615,11 +622,11 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
struct hwrm_func_reset_input req = {.req_type = 0 };
struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_RESET);
+ HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(0);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -636,7 +643,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_REGISTERED)
return 0;
- HWRM_PREP(req, FUNC_DRV_RGTR);
+ HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
req.ver_maj = RTE_VER_YEAR;
@@ -668,7 +675,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -694,7 +701,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_vf_cfg_input req = {0};
- HWRM_PREP(req, FUNC_VF_CFG);
+ HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32
(HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
@@ -733,7 +740,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
req.flags = rte_cpu_to_le_32(flags);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (test)
HWRM_CHECK_RESULT_SILENT();
@@ -750,10 +757,10 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_resource_qcaps_input req = {0};
- HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
+ HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -782,20 +789,19 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
int rc = 0;
struct hwrm_ver_get_input req = {.req_type = 0 };
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
- uint32_t my_version;
uint32_t fw_version;
uint16_t max_resp_len;
char type[RTE_MEMZONE_NAMESIZE];
uint32_t dev_caps_cfg;
bp->max_req_len = HWRM_MAX_REQ_LEN;
- HWRM_PREP(req, VER_GET);
+ HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -810,10 +816,6 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
- my_version = HWRM_VERSION_MAJOR << 16;
- my_version |= HWRM_VERSION_MINOR << 8;
- my_version |= HWRM_VERSION_UPDATE;
-
fw_version = resp->hwrm_intf_maj_8b << 16;
fw_version |= resp->hwrm_intf_min_8b << 8;
fw_version |= resp->hwrm_intf_upd_8b;
@@ -825,21 +827,6 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
goto error;
}
- if (my_version != fw_version) {
- PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
- if (my_version < fw_version) {
- PMD_DRV_LOG(INFO,
- "Firmware API version is newer than driver.\n");
- PMD_DRV_LOG(INFO,
- "The driver may be missing features.\n");
- } else {
- PMD_DRV_LOG(INFO,
- "Firmware API version is older than driver.\n");
- PMD_DRV_LOG(INFO,
- "Not all driver features may be functional.\n");
- }
- }
-
if (bp->max_req_len > resp->max_req_win_len) {
PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
@@ -899,6 +886,14 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->flags |= BNXT_FLAG_SHORT_CMD;
}
+ if (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
+ bp->flags |= BNXT_FLAG_KONG_MB_EN;
+ PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
+ }
+ if (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
+ PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
error:
HWRM_UNLOCK();
@@ -914,10 +909,10 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
if (!(bp->flags & BNXT_FLAG_REGISTERED))
return 0;
- HWRM_PREP(req, FUNC_DRV_UNRGTR);
+ HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
req.flags = flags;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -934,7 +929,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
- HWRM_PREP(req, PORT_PHY_CFG);
+ HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
if (conf->link_up) {
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
@@ -983,7 +978,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
PMD_DRV_LOG(INFO, "Force Link Down\n");
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -998,9 +993,9 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, PORT_PHY_QCFG);
+ HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1046,14 +1041,14 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
int i;
- HWRM_PREP(req, QUEUE_QPORTCFG);
+ HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
/* HWRM Version >= 1.9.1 */
if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
req.drv_qmap_cap =
HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1099,7 +1094,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_ALLOC);
+ HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
req.fbo = rte_cpu_to_le_32(0);
@@ -1135,7 +1130,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
}
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
@@ -1175,12 +1170,12 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
struct hwrm_ring_free_input req = {.req_type = 0 };
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_FREE);
+ HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
req.ring_type = ring_type;
req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
@@ -1215,14 +1210,14 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_GRP_ALLOC);
+ HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1240,11 +1235,11 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
struct hwrm_ring_grp_free_input req = {.req_type = 0 };
struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_GRP_FREE);
+ HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1262,11 +1257,11 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
return rc;
- HWRM_PREP(req, STAT_CTX_CLR_STATS);
+ HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1281,14 +1276,14 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_ALLOC);
+ HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
req.update_period_ms = rte_cpu_to_le_32(0);
req.stats_dma_addr =
rte_cpu_to_le_64(cpr->hw_stats_map);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1306,11 +1301,11 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_FREE);
+ HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1336,12 +1331,12 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE;
- HWRM_PREP(req, VNIC_ALLOC);
+ HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
if (vnic->func_default)
req.flags =
rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1359,11 +1354,11 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_PLCMODES_QCFG);
+ HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1387,7 +1382,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_PLCMODES_CFG);
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.flags = rte_cpu_to_le_32(pmode->flags);
@@ -1400,7 +1395,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1425,7 +1420,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
if (rc)
return rc;
- HWRM_PREP(req, VNIC_CFG);
+ HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
/* Only RSS support for now TBD: COS & LB */
req.enables =
@@ -1445,9 +1440,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
req.mru = rte_cpu_to_le_16(vnic->mru);
- if (vnic->func_default)
+ /* Configure default VNIC only once. */
+ if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
req.flags |=
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
+ bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
+ }
if (vnic->vlan_strip)
req.flags |=
rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
@@ -1464,7 +1462,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req.flags |= rte_cpu_to_le_32(
HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1485,14 +1483,14 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
- HWRM_PREP(req, VNIC_QCFG);
+ HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
req.enables =
rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.vf_id = rte_cpu_to_le_16(fw_vf_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1526,9 +1524,9 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1550,11 +1548,11 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
- HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1575,16 +1573,20 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
- HWRM_PREP(req, VNIC_FREE);
+ HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ /* Configure default VNIC again if necessary. */
+ if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
+ bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
+
return rc;
}
@@ -1595,7 +1597,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_RSS_CFG);
+ HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
req.hash_mode_flags = vnic->hash_mode;
@@ -1606,7 +1608,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1627,7 +1629,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
return rc;
}
- HWRM_PREP(req, VNIC_PLCMODES_CFG);
+ HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(
HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
@@ -1641,7 +1643,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
req.jumbo_thresh = rte_cpu_to_le_16(size);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1656,7 +1658,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_TPA_CFG);
+ HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
if (enable) {
req.enables = rte_cpu_to_le_32(
@@ -1677,7 +1679,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
}
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1697,9 +1699,9 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -1715,11 +1717,11 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
struct hwrm_func_qstats_input req = {.req_type = 0};
struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_QSTATS);
+ HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1738,11 +1740,11 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
struct hwrm_func_qstats_input req = {.req_type = 0};
struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_QSTATS);
+ HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -1775,11 +1777,11 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
struct hwrm_func_clr_stats_input req = {.req_type = 0};
struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_CLR_STATS);
+ HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2435,10 +2437,10 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
uint16_t flags;
int rc = 0;
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -2448,6 +2450,11 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
+ if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
+ bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
+ PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
+ }
+
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
@@ -2522,9 +2529,9 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
req.fid = rte_cpu_to_le_16(0xffff);
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2595,9 +2602,9 @@ static void reserve_resources_from_vf(struct bnxt *bp,
int rc;
/* Get the actual allocated values now */
- HWRM_PREP(req, FUNC_QCAPS);
+ HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
@@ -2631,9 +2638,9 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
int rc;
/* Check for zero MAC address */
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
@@ -2656,9 +2663,9 @@ static int update_pf_resource_max(struct bnxt *bp)
int rc;
/* And copy the allocated numbers into the pf struct */
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
/* Only TX ring value reflects actual allocation? TODO */
@@ -2758,10 +2765,13 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
for (i = 0; i < num_vfs; i++) {
add_random_mac_if_needed(bp, &req, i);
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp,
+ &req,
+ sizeof(req),
+ BNXT_USE_CHIMP_MB);
/* Clear enable flag for next pass */
req.enables &= ~rte_cpu_to_le_32(
@@ -2811,13 +2821,13 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
req.evb_mode = bp->pf.evb_mode;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2831,10 +2841,10 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
+ HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
req.tunnel_type = tunnel_type;
req.tunnel_dst_port_val = port;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
switch (tunnel_type) {
@@ -2862,11 +2872,11 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
+ HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
req.tunnel_type = tunnel_type;
req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2881,11 +2891,11 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.flags = rte_cpu_to_le_32(flags);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2911,7 +2921,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_BUF_RGTR);
+ HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
req.req_buf_num_pages = rte_cpu_to_le_16(1);
req.req_buf_page_size = rte_cpu_to_le_16(
@@ -2925,7 +2935,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return -ENOMEM;
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2939,9 +2949,9 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_BUF_UNRGTR);
+ HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2955,7 +2965,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(0xffff);
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
@@ -2963,7 +2973,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2977,13 +2987,13 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
struct hwrm_func_vf_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_VF_CFG);
+ HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(
- HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -2999,7 +3009,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
uint32_t func_cfg_flags;
int rc = 0;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
if (is_vf) {
dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
@@ -3016,7 +3026,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3031,13 +3041,13 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(enables);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
req.max_bw = rte_cpu_to_le_32(max_bw);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3051,14 +3061,14 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3088,12 +3098,12 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
if (ec_size > sizeof(req.encap_request))
return -1;
- HWRM_PREP(req, REJECT_FWD_RESP);
+ HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
memcpy(req.encap_request, encaped, ec_size);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3108,10 +3118,10 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- HWRM_PREP(req, FUNC_QCFG);
+ HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -3132,12 +3142,12 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
if (ec_size > sizeof(req.encap_request))
return -1;
- HWRM_PREP(req, EXEC_FWD_RESP);
+ HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
memcpy(req.encap_request, encaped, ec_size);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3152,11 +3162,11 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
struct hwrm_stat_ctx_query_input req = {.req_type = 0};
struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_QUERY);
+ HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_32(cid);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -3192,12 +3202,12 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
- HWRM_PREP(req, PORT_QSTATS);
+ HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
req.port_id = rte_cpu_to_le_16(pf->port_id);
req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3217,10 +3227,10 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
return 0;
- HWRM_PREP(req, PORT_CLR_STATS);
+ HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
req.port_id = rte_cpu_to_le_16(pf->port_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3237,9 +3247,9 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
if (BNXT_VF(bp))
return 0;
- HWRM_PREP(req, PORT_LED_QCAPS);
+ HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
req.port_id = bp->pf.port_id;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -3279,7 +3289,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
- HWRM_PREP(req, PORT_LED_CFG);
+ HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
if (led_on) {
led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
@@ -3297,7 +3307,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3312,9 +3322,9 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
struct hwrm_nvm_get_dir_info_input req = {0};
struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, NVM_GET_DIR_INFO);
+ HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3357,9 +3367,9 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
"unable to map response address to physical memory\n");
return -ENOMEM;
}
- HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
+ HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
@@ -3392,12 +3402,12 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
- HWRM_PREP(req, NVM_READ);
+ HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
req.dir_idx = rte_cpu_to_le_16(index);
req.offset = rte_cpu_to_le_32(offset);
req.len = rte_cpu_to_le_32(length);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc == 0)
memcpy(data, buf, length);
@@ -3414,9 +3424,9 @@ int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
struct hwrm_nvm_erase_dir_entry_input req = {0};
struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
+ HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
req.dir_idx = rte_cpu_to_le_16(index);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3448,7 +3458,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
}
memcpy(buf, data, data_len);
- HWRM_PREP(req, NVM_WRITE);
+ HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
req.dir_type = rte_cpu_to_le_16(dir_type);
req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
@@ -3457,7 +3467,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
req.dir_data_length = rte_cpu_to_le_32(data_len);
req.host_src_addr = rte_cpu_to_le_64(dma_handle);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
rte_free(buf);
HWRM_CHECK_RESULT();
@@ -3499,7 +3509,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
int rc;
/* First query all VNIC ids */
- HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
+ HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
@@ -3511,7 +3521,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
HWRM_UNLOCK();
PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
@@ -3591,7 +3601,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG);
+ HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(
@@ -3599,7 +3609,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
req.vlan_antispoof_mode = on ?
HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3668,7 +3678,7 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp,
if (filter->fw_em_filter_id != UINT64_MAX)
bnxt_hwrm_clear_em_filter(bp, filter);
- HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
+ HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
req.flags = rte_cpu_to_le_32(filter->flags);
@@ -3721,7 +3731,7 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp,
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
HWRM_CHECK_RESULT();
@@ -3741,11 +3751,11 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
return 0;
PMD_DRV_LOG(ERR, "Clear EM filter\n");
- HWRM_PREP(req, CFA_EM_FLOW_FREE);
+ HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3769,7 +3779,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
if (filter->fw_ntuple_filter_id != UINT64_MAX)
bnxt_hwrm_clear_ntuple_filter(bp, filter);
- HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(filter->flags);
@@ -3832,7 +3842,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
req.enables = rte_cpu_to_le_32(enables);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
@@ -3853,11 +3863,11 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
if (filter->fw_ntuple_filter_id == UINT64_MAX)
return 0;
- HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
@@ -3937,11 +3947,55 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
if (!bnxt_stratus_device(bp))
return 0;
- HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
bnxt_hwrm_set_coal_params(coal, &req);
req.ring_id = rte_cpu_to_le_16(ring_id);
- rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
return 0;
}
+
+int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
+{
+ struct hwrm_port_qstats_ext_input req = {0};
+ struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
+ bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
+ return 0;
+
+ HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
+
+ req.port_id = rte_cpu_to_le_16(pf->port_id);
+ if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
+ req.tx_stat_host_addr =
+ rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+ req.tx_stat_size =
+ rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
+ }
+ if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
+ req.rx_stat_host_addr =
+ rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+ req.rx_stat_size =
+ rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
+ }
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ if (rc) {
+ bp->fw_rx_port_stats_ext_size = 0;
+ bp->fw_tx_port_stats_ext_size = 0;
+ } else {
+ bp->fw_rx_port_stats_ext_size =
+ rte_le_to_cpu_16(resp->rx_stat_size);
+ bp->fw_tx_port_stats_ext_size =
+ rte_le_to_cpu_16(resp->tx_stat_size);
+ }
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 379aac6e..ec9b3e00 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -32,6 +32,10 @@ struct bnxt_cp_ring_info;
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC \
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+#define HWRM_SPEC_CODE_1_8_4 0x10804
+#define HWRM_SPEC_CODE_1_9_0 0x10900
+#define HWRM_SPEC_CODE_1_9_2 0x10902
+
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
@@ -174,4 +178,5 @@ int bnxt_vnic_rss_configure(struct bnxt *bp,
int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
struct bnxt_coal *coal, uint16_t ring_id);
int bnxt_hwrm_check_vf_rings(struct bnxt *bp);
+int bnxt_hwrm_ext_port_qstats(struct bnxt *bp);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 832fc9ec..5345d393 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -43,21 +43,19 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
/* Single queue mode */
if (bp->rx_cp_nr_rings < 2) {
- vnic = bnxt_alloc_vnic(bp);
+ vnic = &bp->vnic_info[0];
if (!vnic) {
PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
vnic->flags |= BNXT_VNIC_INFO_BCAST;
- STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
bp->nr_vnics++;
rxq = bp->eth_dev->data->rx_queues[0];
rxq->vnic = vnic;
vnic->func_default = true;
- vnic->ff_pool_idx = 0;
vnic->start_grp_id = 0;
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
@@ -85,6 +83,9 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
RTE_MIN(bp->max_l2_ctx,
RTE_MIN(bp->max_rsscos_ctx,
ETH_64_POOLS)));
+ PMD_DRV_LOG(DEBUG,
+ "pools = %u max_pools = %u\n",
+ pools, max_pools);
if (pools > max_pools)
pools = max_pools;
break;
@@ -98,25 +99,27 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
goto err_out;
}
}
-
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ PMD_DRV_LOG(ERR, "pools = %u nb_q_per_grp = %u\n", pools, nb_q_per_grp);
start_grp_id = 0;
end_grp_id = nb_q_per_grp;
for (i = 0; i < pools; i++) {
- vnic = bnxt_alloc_vnic(bp);
+ vnic = &bp->vnic_info[i];
if (!vnic) {
PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
vnic->flags |= BNXT_VNIC_INFO_BCAST;
- STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
bp->nr_vnics++;
for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
rxq = bp->eth_dev->data->rx_queues[ring_idx];
rxq->vnic = vnic;
+ PMD_DRV_LOG(DEBUG,
+ "rxq[%d] = %p vnic[%d] = %p\n",
+ ring_idx, rxq, i, vnic);
}
if (i == 0) {
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
@@ -125,7 +128,6 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
vnic->func_default = true;
}
- vnic->ff_pool_idx = i;
vnic->start_grp_id = start_grp_id;
vnic->end_grp_id = end_grp_id;
@@ -176,7 +178,7 @@ out:
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
for (i = 0; i < bp->nr_vnics; i++) {
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic = &bp->vnic_info[i];
vnic->hash_type = hash_type;
/*
@@ -187,7 +189,6 @@ out:
rss->rss_key_len <= HW_HASH_KEY_SIZE)
memcpy(vnic->rss_hash_key,
rss->rss_key, rss->rss_key_len);
- }
}
}
@@ -331,8 +332,10 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
- ETHER_CRC_LEN : 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index a5d3c866..c16bf99d 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -26,8 +26,8 @@ static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
rx_256b_511b_frames)},
{"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
rx_512b_1023b_frames)},
- {"rx_1024b_1518_frames", offsetof(struct rx_port_stats,
- rx_1024b_1518_frames)},
+ {"rx_1024b_1518b_frames", offsetof(struct rx_port_stats,
+ rx_1024b_1518b_frames)},
{"rx_good_vlan_frames", offsetof(struct rx_port_stats,
rx_good_vlan_frames)},
{"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
@@ -93,12 +93,12 @@ static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
tx_256b_511b_frames)},
{"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
tx_512b_1023b_frames)},
- {"tx_1024b_1518_frames", offsetof(struct tx_port_stats,
- tx_1024b_1518_frames)},
+ {"tx_1024b_1518b_frames", offsetof(struct tx_port_stats,
+ tx_1024b_1518b_frames)},
{"tx_good_vlan_frames", offsetof(struct tx_port_stats,
tx_good_vlan_frames)},
- {"tx_1519b_2047_frames", offsetof(struct tx_port_stats,
- tx_1519b_2047_frames)},
+ {"tx_1519b_2047b_frames", offsetof(struct tx_port_stats,
+ tx_1519b_2047b_frames)},
{"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
tx_2048b_4095b_frames)},
{"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
@@ -180,6 +180,150 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
rx_agg_aborts)},
};
+static const struct bnxt_xstats_name_off bnxt_rx_ext_stats_strings[] = {
+ {"link_down_events", offsetof(struct rx_port_stats_ext,
+ link_down_events)},
+ {"continuous_pause_events", offsetof(struct rx_port_stats_ext,
+ continuous_pause_events)},
+ {"resume_pause_events", offsetof(struct rx_port_stats_ext,
+ resume_pause_events)},
+ {"continuous_roce_pause_events", offsetof(struct rx_port_stats_ext,
+ continuous_roce_pause_events)},
+ {"resume_roce_pause_events", offsetof(struct rx_port_stats_ext,
+ resume_roce_pause_events)},
+ {"rx_bytes_cos0", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos0)},
+ {"rx_bytes_cos1", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos1)},
+ {"rx_bytes_cos2", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos2)},
+ {"rx_bytes_cos3", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos3)},
+ {"rx_bytes_cos4", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos4)},
+ {"rx_bytes_cos5", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos5)},
+ {"rx_bytes_cos6", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos6)},
+ {"rx_bytes_cos7", offsetof(struct rx_port_stats_ext,
+ rx_bytes_cos7)},
+ {"rx_packets_cos0", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos0)},
+ {"rx_packets_cos1", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos1)},
+ {"rx_packets_cos2", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos2)},
+ {"rx_packets_cos3", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos3)},
+ {"rx_packets_cos4", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos4)},
+ {"rx_packets_cos5", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos5)},
+ {"rx_packets_cos6", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos6)},
+ {"rx_packets_cos7", offsetof(struct rx_port_stats_ext,
+ rx_packets_cos7)},
+ {"pfc_pri0_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri0_rx_duration_us)},
+ {"pfc_pri0_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri0_rx_transitions)},
+ {"pfc_pri1_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri1_rx_duration_us)},
+ {"pfc_pri1_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri1_rx_transitions)},
+ {"pfc_pri2_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri2_rx_duration_us)},
+ {"pfc_pri2_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri2_rx_transitions)},
+ {"pfc_pri3_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri3_rx_duration_us)},
+ {"pfc_pri3_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri3_rx_transitions)},
+ {"pfc_pri4_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri4_rx_duration_us)},
+ {"pfc_pri4_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri4_rx_transitions)},
+ {"pfc_pri5_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri5_rx_duration_us)},
+ {"pfc_pri5_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri5_rx_transitions)},
+ {"pfc_pri6_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri6_rx_duration_us)},
+ {"pfc_pri6_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri6_rx_transitions)},
+ {"pfc_pri7_rx_duration_us", offsetof(struct rx_port_stats_ext,
+ pfc_pri7_rx_duration_us)},
+ {"pfc_pri7_rx_transitions", offsetof(struct rx_port_stats_ext,
+ pfc_pri7_rx_transitions)},
+};
+
+static const struct bnxt_xstats_name_off bnxt_tx_ext_stats_strings[] = {
+ {"tx_bytes_cos0", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos0)},
+ {"tx_bytes_cos1", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos1)},
+ {"tx_bytes_cos2", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos2)},
+ {"tx_bytes_cos3", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos3)},
+ {"tx_bytes_cos4", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos4)},
+ {"tx_bytes_cos5", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos5)},
+ {"tx_bytes_cos6", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos6)},
+ {"tx_bytes_cos7", offsetof(struct tx_port_stats_ext,
+ tx_bytes_cos7)},
+ {"tx_packets_cos0", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos0)},
+ {"tx_packets_cos1", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos1)},
+ {"tx_packets_cos2", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos2)},
+ {"tx_packets_cos3", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos3)},
+ {"tx_packets_cos4", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos4)},
+ {"tx_packets_cos5", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos5)},
+ {"tx_packets_cos6", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos6)},
+ {"tx_packets_cos7", offsetof(struct tx_port_stats_ext,
+ tx_packets_cos7)},
+ {"pfc_pri0_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri0_tx_duration_us)},
+ {"pfc_pri0_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri0_tx_transitions)},
+ {"pfc_pri1_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri1_tx_duration_us)},
+ {"pfc_pri1_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri1_tx_transitions)},
+ {"pfc_pri2_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri2_tx_duration_us)},
+ {"pfc_pri2_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri2_tx_transitions)},
+ {"pfc_pri3_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri3_tx_duration_us)},
+ {"pfc_pri3_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri3_tx_transitions)},
+ {"pfc_pri4_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri4_tx_duration_us)},
+ {"pfc_pri4_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri4_tx_transitions)},
+ {"pfc_pri5_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri5_tx_duration_us)},
+ {"pfc_pri5_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri5_tx_transitions)},
+ {"pfc_pri6_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri6_tx_duration_us)},
+ {"pfc_pri6_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri6_tx_transitions)},
+ {"pfc_pri7_tx_duration_us", offsetof(struct tx_port_stats_ext,
+ pfc_pri7_tx_duration_us)},
+ {"pfc_pri7_tx_transitions", offsetof(struct tx_port_stats_ext,
+ pfc_pri7_tx_transitions)},
+};
+
/*
* Statistics functions
*/
@@ -265,12 +409,22 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
unsigned int count, i;
uint64_t tx_drop_pkts;
+ unsigned int rx_port_stats_ext_cnt;
+ unsigned int tx_port_stats_ext_cnt;
+ unsigned int stat_size = sizeof(uint64_t);
+ unsigned int stat_count;
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
+ bnxt_hwrm_ext_port_qstats(bp);
+ rx_port_stats_ext_cnt = bp->fw_rx_port_stats_ext_size / stat_size;
+ tx_port_stats_ext_cnt = bp->fw_tx_port_stats_ext_size / stat_size;
count = RTE_DIM(bnxt_rx_stats_strings) +
- RTE_DIM(bnxt_tx_stats_strings) + 1; /* For tx_drop_pkts */
+ RTE_DIM(bnxt_tx_stats_strings) + 1/* For tx_drop_pkts */ +
+ RTE_DIM(bnxt_rx_ext_stats_strings) +
+ RTE_DIM(bnxt_tx_ext_stats_strings);
+ stat_count = count;
if (n < count)
return count;
@@ -299,7 +453,27 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
count++;
- return count;
+ for (i = 0; i < tx_port_stats_ext_cnt; i++) {
+ uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext;
+
+ xstats[count].value = rte_le_to_cpu_64
+ (*(uint64_t *)((char *)tx_stats_ext +
+ bnxt_tx_ext_stats_strings[i].offset));
+
+ count++;
+ }
+
+ for (i = 0; i < rx_port_stats_ext_cnt; i++) {
+ uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;
+
+ xstats[count].value = rte_le_to_cpu_64
+ (*(uint64_t *)((char *)rx_stats_ext +
+ bnxt_rx_ext_stats_strings[i].offset));
+
+ count++;
+ }
+
+ return stat_count;
}
int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
@@ -308,7 +482,9 @@ int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
{
/* Account for the Tx drop pkts aka the Anti spoof counter */
const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
- RTE_DIM(bnxt_tx_stats_strings) + 1;
+ RTE_DIM(bnxt_tx_stats_strings) + 1 +
+ RTE_DIM(bnxt_rx_ext_stats_strings) +
+ RTE_DIM(bnxt_tx_ext_stats_strings);
unsigned int i, count;
if (xstats_names != NULL) {
@@ -335,6 +511,25 @@ int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
"%s",
bnxt_func_stats_strings[4].name);
count++;
+
+ for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_rx_ext_stats_strings[i].name);
+
+ count++;
+ }
+
+ for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ bnxt_tx_ext_stats_strings[i].name);
+
+ count++;
+ }
+
}
return stat_cnt;
}
@@ -359,7 +554,9 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
{
/* Account for the Tx drop pkts aka the Anti spoof counter */
const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
- RTE_DIM(bnxt_tx_stats_strings) + 1;
+ RTE_DIM(bnxt_tx_stats_strings) + 1 +
+ RTE_DIM(bnxt_rx_ext_stats_strings) +
+ RTE_DIM(bnxt_tx_ext_stats_strings);
struct rte_eth_xstat xstats[stat_cnt];
uint64_t values_copy[stat_cnt];
uint16_t i;
@@ -384,7 +581,9 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
{
/* Account for the Tx drop pkts aka the Anti spoof counter */
const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
- RTE_DIM(bnxt_tx_stats_strings) + 1;
+ RTE_DIM(bnxt_tx_stats_strings) + 1 +
+ RTE_DIM(bnxt_rx_ext_stats_strings) +
+ RTE_DIM(bnxt_tx_ext_stats_strings);
struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
uint16_t i;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 67bb35e0..39be7bdf 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -120,7 +120,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct tx_bd_long *txbd;
- struct tx_bd_long_hi *txbd1;
+ struct tx_bd_long_hi *txbd1 = NULL;
uint32_t vlan_tag_flags, cfa_action;
bool long_bd = false;
uint16_t last_prod = 0;
@@ -295,7 +295,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
}
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
- txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
+ if (txbd1)
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index c0577cd7..aebfb1f1 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -57,29 +57,6 @@ void bnxt_init_vnics(struct bnxt *bp)
STAILQ_INIT(&vnic->flow_list);
STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
}
- for (i = 0; i < MAX_FF_POOLS; i++)
- STAILQ_INIT(&bp->ff_pool[i]);
-}
-
-int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
- int pool)
-{
- struct bnxt_vnic_info *temp;
-
- temp = STAILQ_FIRST(&bp->ff_pool[pool]);
- while (temp) {
- if (temp == vnic) {
- STAILQ_REMOVE(&bp->ff_pool[pool], vnic,
- bnxt_vnic_info, next);
- vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
- STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic,
- next);
- return 0;
- }
- temp = STAILQ_NEXT(temp, next);
- }
- PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
- return -EINVAL;
}
struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
@@ -98,26 +75,22 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
void bnxt_free_all_vnics(struct bnxt *bp)
{
- struct bnxt_vnic_info *temp, *next;
- int i;
+ struct bnxt_vnic_info *temp;
+ unsigned int i;
- for (i = 0; i < MAX_FF_POOLS; i++) {
- temp = STAILQ_FIRST(&bp->ff_pool[i]);
- while (temp) {
- next = STAILQ_NEXT(temp, next);
- STAILQ_REMOVE(&bp->ff_pool[i], temp, bnxt_vnic_info,
- next);
- STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next);
- temp = next;
- }
+ for (i = 0; i < bp->nr_vnics; i++) {
+ temp = &bp->vnic_info[i];
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next);
}
}
void bnxt_free_vnic_attributes(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
+ unsigned int i;
- STAILQ_FOREACH(vnic, &bp->free_vnic_list, next) {
+ for (i = 0; i < bp->max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
if (vnic->rss_table) {
/* 'Unreserve' the rss_table */
/* N/A */
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index f5c7b422..e8005793 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -67,6 +67,10 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_HWRM_RESPONSE UINT32_C(0x2)
/* RoCE slow path command */
#define TLV_TYPE_ROCE_SP_COMMAND UINT32_C(0x3)
+/* RoCE slow path command to query CC Gen1 support. */
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 UINT32_C(0xcommand 0x0005)
+/* RoCE slow path command to modify CC Gen1 support. */
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 UINT32_C(0xcommand 0x0005)
/* Engine CKV - The device's serial number. */
#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER UINT32_C(0x8001)
/* Engine CKV - Per-function random nonce data. */
@@ -256,6 +260,7 @@ struct cmd_nums {
*/
uint16_t req_type;
#define HWRM_VER_GET UINT32_C(0x0)
+ #define HWRM_FUNC_DRV_IF_CHANGE UINT32_C(0xd)
#define HWRM_FUNC_BUF_UNRGTR UINT32_C(0xe)
#define HWRM_FUNC_VF_CFG UINT32_C(0xf)
/* Reserved for future use. */
@@ -328,6 +333,7 @@ struct cmd_nums {
#define HWRM_RING_FREE UINT32_C(0x51)
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS UINT32_C(0x52)
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS UINT32_C(0x53)
+ #define HWRM_RING_AGGINT_QCAPS UINT32_C(0x54)
#define HWRM_RING_RESET UINT32_C(0x5e)
#define HWRM_RING_GRP_ALLOC UINT32_C(0x60)
#define HWRM_RING_GRP_FREE UINT32_C(0x61)
@@ -367,6 +373,8 @@ struct cmd_nums {
#define HWRM_PORT_QSTATS_EXT UINT32_C(0xb4)
#define HWRM_FW_RESET UINT32_C(0xc0)
#define HWRM_FW_QSTATUS UINT32_C(0xc1)
+ #define HWRM_FW_HEALTH_CHECK UINT32_C(0xc2)
+ #define HWRM_FW_SYNC UINT32_C(0xc3)
/* Experimental */
#define HWRM_FW_SET_TIME UINT32_C(0xc8)
/* Experimental */
@@ -433,6 +441,7 @@ struct cmd_nums {
/* Experimental */
#define HWRM_FW_IPC_MSG UINT32_C(0x110)
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO UINT32_C(0x111)
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE UINT32_C(0x112)
/* Engine CKV - Ping the device and SRT firmware to get the public key. */
#define HWRM_ENGINE_CKV_HELLO UINT32_C(0x12d)
/* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */
@@ -515,6 +524,10 @@ struct cmd_nums {
#define HWRM_FUNC_BACKING_STORE_CFG UINT32_C(0x193)
/* Experimental */
#define HWRM_FUNC_BACKING_STORE_QCFG UINT32_C(0x194)
+ /* Configures the BW of any VF */
+ #define HWRM_FUNC_VF_BW_CFG UINT32_C(0x195)
+ /* Queries the BW of any VF */
+ #define HWRM_FUNC_VF_BW_QCFG UINT32_C(0x196)
/* Experimental */
#define HWRM_SELFTEST_QLIST UINT32_C(0x200)
/* Experimental */
@@ -544,8 +557,12 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_INITIATE UINT32_C(0xff18)
/* Experimental */
#define HWRM_DBG_COREDUMP_RETRIEVE UINT32_C(0xff19)
+ /* Experimental */
+ #define HWRM_DBG_FW_CLI UINT32_C(0xff1a)
/* */
#define HWRM_DBG_I2C_CMD UINT32_C(0xff1b)
+ /* */
+ #define HWRM_DBG_RING_INFO_GET UINT32_C(0xff1c)
/* Experimental */
#define HWRM_NVM_FACTORY_DEFAULTS UINT32_C(0xffee)
#define HWRM_NVM_VALIDATE_OPTION UINT32_C(0xffef)
@@ -616,6 +633,11 @@ struct ret_codes {
*/
#define HWRM_ERR_CODE_NO_BUFFER UINT32_C(0x8)
/*
+ * This error code is only reported by firmware when some
+ * sub-option of a supported HWRM command is unsupported.
+ */
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR UINT32_C(0x9)
+ /*
* Generic HWRM execution error that represents an
* internal error.
*/
@@ -686,8 +708,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MINOR 9
#define HWRM_VERSION_UPDATE 2
/* non-zero means beta version */
-#define HWRM_VERSION_RSVD 9
-#define HWRM_VERSION_STR "1.9.2.9"
+#define HWRM_VERSION_RSVD 53
+#define HWRM_VERSION_STR "1.9.2.53"
/****************
* hwrm_ver_get *
@@ -902,6 +924,42 @@ struct hwrm_ver_get_output {
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED \
UINT32_C(0x8)
/*
+ * If set to 1, then the KONG host mailbox channel is supported.
+ * If set to 0, then the KONG host mailbox channel is not supported.
+ * By default, this flag should be 0 for older version of core firmware.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED \
+ UINT32_C(0x10)
+ /*
+ * If set to 1, then the 64bit flow handle is supported in addition to the
+ * legacy 16bit flow handle. If set to 0, then the 64bit flow handle is not
+ * supported. By default, this flag should be 0 for older version of core firmware.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED \
+ UINT32_C(0x20)
+ /*
+ * If set to 1, then filter type can be provided in filter_alloc or filter_cfg
+ * filter types like L2 for l2 traffic and ROCE for roce & l2 traffic.
+ * If set to 0, then filter types not supported.
+ * By default, this flag should be 0 for older version of core firmware.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED \
+ UINT32_C(0x40)
+ /*
+ * If set to 1, firmware is capable to support virtio vSwitch offload model.
+ * If set to 0, firmware can't supported virtio vSwitch offload model.
+ * By default, this flag should be 0 for older version of core firmware.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED \
+ UINT32_C(0x80)
+ /*
+ * If set to 1, firmware is capable to support trusted VF.
+ * If set to 0, firmware is not capable to support trusted VF.
+ * By default, this flag should be 0 for older version of core firmware.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED \
+ UINT32_C(0x100)
+ /*
* This field represents the major version of RoCE firmware.
* A change in major version represents a major release.
*/
@@ -1154,39 +1212,45 @@ struct hwrm_ver_get_output {
struct bd_base {
uint8_t type;
/* This value identifies the type of buffer descriptor. */
- #define BD_BASE_TYPE_MASK UINT32_C(0x3f)
- #define BD_BASE_TYPE_SFT 0
+ #define BD_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define BD_BASE_TYPE_SFT 0
/*
* Indicates that this BD is 16B long and is used for
* normal L2 packet transmission.
*/
- #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0)
/*
* Indicates that this BD is 1BB long and is an empty
* TX BD. Not valid for use by the driver.
*/
- #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1)
+ #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1)
/*
* Indicates that this BD is 16B long and is an RX Producer
* (ie. empty) buffer descriptor.
*/
- #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4)
/*
* Indicates that this BD is 16B long and is an RX
* Producer Buffer BD.
*/
- #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5)
+ #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5)
/*
* Indicates that this BD is 16B long and is an
* RX Producer Assembly Buffer Descriptor.
*/
- #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6)
+ #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6)
/*
* Indicates that this BD is 32B long and is used for
* normal L2 packet transmission.
*/
- #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10)
- #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG
+ #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10)
+ /*
+ * Indicates that this BD is 32B long and is used for
+ * L2 packet transmission for small packets that require
+ * low latency.
+ */
+ #define BD_BASE_TYPE_TX_BD_LONG_INLINE UINT32_C(0x11)
+ #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG_INLINE
uint8_t unused_1[7];
} __attribute__((packed));
@@ -1406,6 +1470,7 @@ struct tx_bd_long {
uint64_t address;
} __attribute__((packed));
+/* Last 16 bytes of tx_bd_long. */
/* tx_bd_long_hi (size:128b/16B) */
struct tx_bd_long_hi {
/*
@@ -1595,6 +1660,219 @@ struct tx_bd_long_hi {
TX_BD_LONG_CFA_META_KEY_VLAN_TAG
} __attribute__((packed));
+/*
+ * This structure is used to inform the NIC of packet data that needs to be
+ * transmitted with additional processing that requires extra data such as
+ * VLAN insertion plus attached inline data. This BD type may be used to
+ * improve latency for small packets needing the additional extended features
+ * supported by long BDs.
+ */
+/* tx_bd_long_inline (size:256b/32B) */
+struct tx_bd_long_inline {
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define TX_BD_LONG_INLINE_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_LONG_INLINE_TYPE_SFT 0
+ /*
+ * This type of BD is 32B long and is used for inline L2 packet
+ * transmission.
+ */
+ #define TX_BD_LONG_INLINE_TYPE_TX_BD_LONG_INLINE UINT32_C(0x11)
+ #define TX_BD_LONG_INLINE_TYPE_LAST \
+ TX_BD_LONG_INLINE_TYPE_TX_BD_LONG_INLINE
+ /*
+ * All bits in this field may be set on the first BD of a packet.
+ * Only the packet_end bit may be set in non-first BDs.
+ */
+ #define TX_BD_LONG_INLINE_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_LONG_INLINE_FLAGS_SFT 6
+ /*
+ * If set to 1, the packet ends with the data in the buffer
+ * pointed to by this descriptor. This flag must be
+ * valid on every BD.
+ */
+ #define TX_BD_LONG_INLINE_FLAGS_PACKET_END UINT32_C(0x40)
+ /*
+ * If set to 1, the device will not generate a completion for
+ * this transmit packet unless there is an error in its processing.
+ * If this bit is set to 0, then the packet will be completed
+ * normally.
+ *
+ * This bit may be set only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_INLINE_FLAGS_NO_CMPL UINT32_C(0x80)
+ /*
+ * This value indicates how many 16B BD locations are consumed
+ * in the ring by this packet, including the BD and inline
+ * data.
+ */
+ #define TX_BD_LONG_INLINE_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_LONG_INLINE_FLAGS_BD_CNT_SFT 8
+ /* This field is deprecated. */
+ #define TX_BD_LONG_INLINE_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_LONG_INLINE_FLAGS_LHINT_SFT 13
+ /*
+ * If set to 1, the device immediately updates the Send Consumer
+ * Index after the buffer associated with this descriptor has
+ * been transferred via DMA to NIC memory from host memory. An
+ * interrupt may or may not be generated according to the state
+ * of the interrupt avoidance mechanisms. If this bit
+ * is set to 0, then the Consumer Index is only updated as soon
+ * as one of the host interrupt coalescing conditions has been met.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_INLINE_FLAGS_COAL_NOW UINT32_C(0x8000)
+ /*
+ * This is the length of the inline data, not including BD length, in
+ * bytes.
+ * The maximum value is 480.
+ *
+ * This field must be valid on all BDs of a packet.
+ */
+ uint16_t len;
+ /*
+ * The opaque data field is passed through to the completion and can be
+ * used for any data that the driver wants to associate with the transmit
+ * BD.
+ *
+ * This field must be valid on the first BD of a packet.
+ */
+ uint32_t opaque;
+ uint64_t unused1;
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Their value on other BDs of the packet is ignored.
+ */
+ uint16_t lflags;
+ /*
+ * If set to 1, the controller replaces the TCP/UPD checksum
+ * fields of normal TCP/UPD checksum, or the inner TCP/UDP
+ * checksum field of the encapsulated TCP/UDP packets with the
+ * hardware calculated TCP/UDP checksum for the packet associated
+ * with this descriptor. The flag is ignored if the LSO flag is set.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+ /*
+ * If set to 1, the controller replaces the IP checksum of the
+ * normal packets, or the inner IP checksum of the encapsulated
+ * packets with the hardware calculated IP checksum for the
+ * packet associated with this descriptor.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ /*
+ * If set to 1, the controller will not append an Ethernet CRC
+ * to the end of the frame.
+ *
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
+ * has a valid Ethernet CRC on the packet.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_NOCRC UINT32_C(0x4)
+ /*
+ * If set to 1, the device will record the time at which the packet
+ * was actually transmitted at the TX MAC.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_STAMP UINT32_C(0x8)
+ /*
+ * If set to 1, the controller replaces the tunnel IP checksum
+ * field with hardware calculated IP checksum for the IP header
+ * of the packet associated with this descriptor. The hardware
+ * updates an outer UDP checksum if it is non-zero.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * This bit must be 0 for BDs of this type. LSO is not supported with
+ * inline BDs.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_LSO UINT32_C(0x20)
+ /* Since LSO is not supported with inline BDs, this bit is not used. */
+ #define TX_BD_LONG_INLINE_LFLAGS_IPID_FMT UINT32_C(0x40)
+ /* Since LSO is not supported with inline BDs, this bit is not used. */
+ #define TX_BD_LONG_INLINE_LFLAGS_T_IPID UINT32_C(0x80)
+ /*
+ * If set to '1', then the RoCE ICRC will be appended to the
+ * packet. Packet must be a valid RoCE format packet.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ /*
+ * If set to '1', then the FCoE CRC will be appended to the
+ * packet. Packet must be a valid FCoE format packet.
+ */
+ #define TX_BD_LONG_INLINE_LFLAGS_FCOE_CRC UINT32_C(0x200)
+ uint16_t unused2;
+ uint32_t unused3;
+ uint16_t unused4;
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action;
+ /*
+ * This value is action meta-data that defines CFA edit operations
+ * that are done in addition to any action editing.
+ */
+ uint32_t cfa_meta;
+ /* When key = 1, this is the VLAN tag VID value. */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_VID_SFT 0
+ /* When key = 1, this is the VLAN tag DE value. */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_DE UINT32_C(0x1000)
+ /* When key = 1, this is the VLAN tag PRI value. */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_PRI_SFT 13
+ /* When key = 1, this is the VLAN tag TPID select value. */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_SFT 16
+ /* 0x88a8 */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID88A8 \
+ (UINT32_C(0x0) << 16)
+ /* 0x8100 */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID8100 \
+ (UINT32_C(0x1) << 16)
+ /* 0x9100 */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID9100 \
+ (UINT32_C(0x2) << 16)
+ /* 0x9200 */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID9200 \
+ (UINT32_C(0x3) << 16)
+ /* 0x9300 */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPID9300 \
+ (UINT32_C(0x4) << 16)
+ /* Value programmed in CFA VLANTPID register. */
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPIDCFG \
+ (UINT32_C(0x5) << 16)
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_LAST \
+ TX_BD_LONG_INLINE_CFA_META_VLAN_TPID_TPIDCFG
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_RESERVED_MASK \
+ UINT32_C(0xff80000)
+ #define TX_BD_LONG_INLINE_CFA_META_VLAN_RESERVED_SFT 19
+ /*
+ * This field identifies the type of edit to be performed
+ * on the packet.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_INLINE_CFA_META_KEY_MASK \
+ UINT32_C(0xf0000000)
+ #define TX_BD_LONG_INLINE_CFA_META_KEY_SFT 28
+ /* No editing */
+ #define TX_BD_LONG_INLINE_CFA_META_KEY_NONE \
+ (UINT32_C(0x0) << 28)
+ /*
+ * - meta[17:16] - TPID select value (0 = 0x8100).
+ * - meta[15:12] - PRI/DE value.
+ * - meta[11:0] - VID value.
+ */
+ #define TX_BD_LONG_INLINE_CFA_META_KEY_VLAN_TAG \
+ (UINT32_C(0x1) << 28)
+ #define TX_BD_LONG_INLINE_CFA_META_KEY_LAST \
+ TX_BD_LONG_INLINE_CFA_META_KEY_VLAN_TAG
+} __attribute__((packed));
+
/* tx_bd_empty (size:128b/16B) */
struct tx_bd_empty {
/* This value identifies the type of buffer descriptor. */
@@ -2121,6 +2399,7 @@ struct rx_pkt_cmpl {
uint32_t rss_hash;
} __attribute__((packed));
+/* Last 16 bytes of rx_pkt_cmpl. */
/* rx_pkt_cmpl_hi (size:128b/16B) */
struct rx_pkt_cmpl_hi {
uint32_t flags2;
@@ -2566,6 +2845,7 @@ struct rx_tpa_start_cmpl {
uint32_t rss_hash;
} __attribute__((packed));
+/* Last 16 bytes of rx_tpq_start_cmpl. */
/* rx_tpa_start_cmpl_hi (size:128b/16B) */
struct rx_tpa_start_cmpl_hi {
uint32_t flags2;
@@ -2830,6 +3110,7 @@ struct rx_tpa_end_cmpl {
uint32_t tsdelta;
} __attribute__((packed));
+/* Last 16 bytes of rx_tpa_end_cmpl. */
/* rx_tpa_end_cmpl_hi (size:128b/16B) */
struct rx_tpa_end_cmpl_hi {
/*
@@ -3153,6 +3434,9 @@ struct hwrm_async_event_cmpl {
/* Port PHY configuration change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE \
UINT32_C(0x7)
+ /* Reset notification to clients */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY \
+ UINT32_C(0x8)
/* Function driver unloaded */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \
UINT32_C(0x10)
@@ -3790,6 +4074,96 @@ struct hwrm_async_event_cmpl_port_phy_cfg_change {
UINT32_C(0x40000)
} __attribute__((packed));
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Notify clients of imminent reset. */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY \
+ UINT32_C(0x8)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates driver action requested */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT \
+ 0
+ /*
+ * If set to 1, it indicates that the l2 client should
+ * stop sending in band traffic to Nitro.
+ * if set to 0, there is no change in L2 client behavior.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, it indicates that the L2 client should
+ * bring down the interface.
+ * If set to 0, then there is no change in L2 client behavior.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST \
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ /* Indicates reason for reset. */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK \
+ UINT32_C(0xff00)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT \
+ 8
+ /* A management client has requested reset. */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST \
+ (UINT32_C(0x1) << 8)
+ /* A fatal firmware exception has occurred. */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL \
+ (UINT32_C(0x2) << 8)
+ /* A non-fatal firmware exception has occurred. */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL \
+ (UINT32_C(0x3) << 8)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ /*
+ * Minimum time before driver should attempt access - units 100ms ticks.
+ * Range 0-65535
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK \
+ UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT \
+ 16
+} __attribute__((packed));
+
/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_func_drvr_unload {
uint16_t type;
@@ -4285,6 +4659,13 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
*/
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE \
UINT32_C(0x8)
+ /*
+ * If this bit is set to 1, then the value of trusted VF enable
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE \
+ UINT32_C(0x10)
} __attribute__((packed));
/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */
@@ -5306,6 +5687,20 @@ struct hwrm_func_qcaps_output {
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \
UINT32_C(0x40000)
/*
+ * If the query is for a VF, then this flag shall be ignored.
+ * If this query is for a PF and this flag is set to 1, then
+ * the PF will know that the firmware has the capability to track
+ * the virtual link status.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED \
+ UINT32_C(0x80000)
+ /*
+ * If 1, then this function supports the push mode that uses
+ * write combine buffers and the long inline tx buffer descriptor.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WCB_PUSH_MODE \
+ UINT32_C(0x100000)
+ /*
* This value is current MAC address configured for this
* function. A value of 00-00-00-00-00-00 indicates no
* MAC address is currently configured.
@@ -5548,6 +5943,15 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST \
UINT32_C(0x20)
/*
+ * If the function that is being queried is a PF, then the HWRM shall
+ * set this field to 0 and the HWRM client shall ignore this field.
+ * If the function that is being queried is a VF, then the HWRM shall
+ * set this field to 1 if the queried VF is trusted, otherwise the HWRM
+ * shall set this field to 0.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF \
+ UINT32_C(0x40)
+ /*
* This value is current MAC address configured for this
* function. A value of 00-00-00-00-00-00 indicates no
* MAC address is currently configured.
@@ -5755,7 +6159,7 @@ struct hwrm_func_qcfg_output {
*/
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_MASK \
UINT32_C(0x3)
- #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0
/* Cache Line Size 64 bytes */
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
UINT32_C(0x0)
@@ -5764,10 +6168,25 @@ struct hwrm_func_qcfg_output {
UINT32_C(0x1)
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_LAST \
HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* This value is the virtual link admin state setting. */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_MASK \
+ UINT32_C(0xc)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ /* Admin link state is in forced down mode. */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN \
+ (UINT32_C(0x0) << 2)
+ /* Admin link state is in forced up mode. */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP \
+ (UINT32_C(0x1) << 2)
+ /* Admin link state is in auto mode - follows the physical link state. */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_AUTO \
+ (UINT32_C(0x2) << 2)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_AUTO
/* Reserved for future. */
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_MASK \
- UINT32_C(0xfc)
- #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 2
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 4
/*
* The number of VFs that are allocated to the function.
* This is valid only on the PF with SR-IOV enabled.
@@ -5814,217 +6233,6 @@ struct hwrm_func_qcfg_output {
uint8_t valid;
} __attribute__((packed));
-/***********************
- * hwrm_func_vlan_qcfg *
- ***********************/
-
-
-/* hwrm_func_vlan_qcfg_input (size:192b/24B) */
-struct hwrm_func_vlan_qcfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * Function ID of the function that is being
- * configured.
- * If set to 0xFF... (All Fs), then the configuration is
- * for the requesting function.
- */
- uint16_t fid;
- uint8_t unused_0[6];
-} __attribute__((packed));
-
-/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
-struct hwrm_func_vlan_qcfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
- /* S-TAG VLAN identifier configured for the function. */
- uint16_t stag_vid;
- /* S-TAG PCP value configured for the function. */
- uint8_t stag_pcp;
- uint8_t unused_1;
- /*
- * S-TAG TPID value configured for the function. This field is specified in
- * network byte order.
- */
- uint16_t stag_tpid;
- /* C-TAG VLAN identifier configured for the function. */
- uint16_t ctag_vid;
- /* C-TAG PCP value configured for the function. */
- uint8_t ctag_pcp;
- uint8_t unused_2;
- /*
- * C-TAG TPID value configured for the function. This field is specified in
- * network byte order.
- */
- uint16_t ctag_tpid;
- /* Future use. */
- uint32_t rsvd2;
- /* Future use. */
- uint32_t rsvd3;
- uint32_t unused_3;
-} __attribute__((packed));
-
-/**********************
- * hwrm_func_vlan_cfg *
- **********************/
-
-
-/* hwrm_func_vlan_cfg_input (size:384b/48B) */
-struct hwrm_func_vlan_cfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * Function ID of the function that is being
- * configured.
- * If set to 0xFF... (All Fs), then the configuration is
- * for the requesting function.
- */
- uint16_t fid;
- uint8_t unused_0[2];
- uint32_t enables;
- /*
- * This bit must be '1' for the stag_vid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
- /*
- * This bit must be '1' for the ctag_vid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
- /*
- * This bit must be '1' for the stag_pcp field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
- /*
- * This bit must be '1' for the ctag_pcp field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
- /*
- * This bit must be '1' for the stag_tpid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
- /*
- * This bit must be '1' for the ctag_tpid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
- /* S-TAG VLAN identifier configured for the function. */
- uint16_t stag_vid;
- /* S-TAG PCP value configured for the function. */
- uint8_t stag_pcp;
- uint8_t unused_1;
- /*
- * S-TAG TPID value configured for the function. This field is specified in
- * network byte order.
- */
- uint16_t stag_tpid;
- /* C-TAG VLAN identifier configured for the function. */
- uint16_t ctag_vid;
- /* C-TAG PCP value configured for the function. */
- uint8_t ctag_pcp;
- uint8_t unused_2;
- /*
- * C-TAG TPID value configured for the function. This field is specified in
- * network byte order.
- */
- uint16_t ctag_tpid;
- /* Future use. */
- uint32_t rsvd1;
- /* Future use. */
- uint32_t rsvd2;
- uint8_t unused_3[4];
-} __attribute__((packed));
-
-/* hwrm_func_vlan_cfg_output (size:128b/16B) */
-struct hwrm_func_vlan_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/*****************
* hwrm_func_cfg *
*****************/
@@ -6205,6 +6413,17 @@ struct hwrm_func_cfg_input {
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \
UINT32_C(0x100000)
+ /*
+ * This configuration change can be initiated by a PF driver. This
+ * configuration request shall be targeted to a VF. From local host
+ * resident HWRM clients, only the parent PF driver shall be allowed
+ * to initiate this change on one of its children VFs. If this bit is
+ * set to 1, then the VF that is being configured is requested to be
+ * trusted. If this bit is set to 0, then the VF that is being configured
+ * is requested to be not trusted.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_TRUSTED_VF_ENABLE \
+ UINT32_C(0x200000)
uint32_t enables;
/*
* This bit must be '1' for the mtu field to be
@@ -6339,6 +6558,12 @@ struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX \
UINT32_C(0x200000)
/*
+ * This bit must be '1' for the link admin state field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_LINK_STATE \
+ UINT32_C(0x400000)
+ /*
* The maximum transmission unit of the function.
* The HWRM should make sure that the mtu of
* the function does not exceed the mtu of the physical
@@ -6569,7 +6794,7 @@ struct hwrm_func_cfg_input {
*/
#define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_MASK \
UINT32_C(0x3)
- #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0
/* Cache Line Size 64 bytes */
#define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
UINT32_C(0x0)
@@ -6578,10 +6803,25 @@ struct hwrm_func_cfg_input {
UINT32_C(0x1)
#define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_LAST \
HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* This value is the virtual link admin state setting. */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_MASK \
+ UINT32_C(0xc)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ /* Admin state is forced down. */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN \
+ (UINT32_C(0x0) << 2)
+ /* Admin state is forced up. */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP \
+ (UINT32_C(0x1) << 2)
+ /* Admin state is in auto mode - is to follow the physical link state. */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_AUTO \
+ (UINT32_C(0x2) << 2)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_LAST \
+ HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_AUTO
/* Reserved for future. */
#define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_MASK \
- UINT32_C(0xfc)
- #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 2
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 4
/*
* The number of multicast filters that should
* be reserved for this function on the RX side.
@@ -6862,79 +7102,6 @@ struct hwrm_func_vf_resc_free_output {
uint8_t valid;
} __attribute__((packed));
-/*******************************
- * hwrm_func_vf_vnic_ids_query *
- *******************************/
-
-
-/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
-struct hwrm_func_vf_vnic_ids_query_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * This value is used to identify a Virtual Function (VF).
- * The scope of VF ID is local within a PF.
- */
- uint16_t vf_id;
- uint8_t unused_0[2];
- /* Max number of vnic ids in vnic id table */
- uint32_t max_vnic_id_cnt;
- /* This is the address for VF VNIC ID table */
- uint64_t vnic_id_tbl_addr;
-} __attribute__((packed));
-
-/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
-struct hwrm_func_vf_vnic_ids_query_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /*
- * Actual number of vnic ids
- *
- * Each VNIC ID is written as a 32-bit number.
- */
- uint32_t vnic_id_cnt;
- uint8_t unused_0[3];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/**********************
* hwrm_func_drv_rgtr *
**********************/
@@ -6978,7 +7145,8 @@ struct hwrm_func_drv_rgtr_input {
* If a VF driver sets this flag, it should be ignored
* by the HWRM.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE \
+ UINT32_C(0x1)
/*
* When this bit is '1', the function is requesting none of
* the requests from its children VF drivers to be
@@ -6987,7 +7155,8 @@ struct hwrm_func_drv_rgtr_input {
* If a VF driver sets this flag, it should be ignored
* by the HWRM.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE \
+ UINT32_C(0x2)
/*
* When this bit is '1', then ver_maj_8b, ver_min_8b, ver_upd_8b
* fields shall be ignored and ver_maj, ver_min, ver_upd
@@ -6996,7 +7165,22 @@ struct hwrm_func_drv_rgtr_input {
* fields shall be used for the driver version information and
* ver_maj, ver_min, ver_upd and ver_patch shall be ignored.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE UINT32_C(0x4)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the function is indicating support of
+ * 64bit flow handle. The firmware that only supports 64bit flow
+ * handle should check this bit before allowing processing of
+ * HWRM_CFA_FLOW_XXX commands from the requesting function as firmware
+ * with 64bit flow handle support can only be compatible with drivers
+ * that support 64bit flow handle. The legacy drivers that don't support
+ * 64bit flow handle won't be able to use HWRM_CFA_FLOW_XXX commands when
+ * running with new firmware that only supports 64bit flow handle. The new
+ * firmware support 64bit flow handle returns HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ * status to the legacy driver when encounters these commands.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE \
+ UINT32_C(0x8)
uint32_t enables;
/*
* This bit must be '1' for the os_type field to be
@@ -7117,7 +7301,14 @@ struct hwrm_func_drv_rgtr_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ uint32_t flags;
+ /*
+ * When this bit is '1', it indicates that the
+ * HWRM_FUNC_DRV_IF_CHANGE call is supported.
+ */
+ #define HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED \
+ UINT32_C(0x1)
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -7441,7 +7632,7 @@ struct hwrm_func_drv_qver_input {
uint8_t unused_0[2];
} __attribute__((packed));
-/* hwrm_func_drv_qver_output (size:192b/24B) */
+/* hwrm_func_drv_qver_output (size:256b/32B) */
struct hwrm_func_drv_qver_output {
/* The specific error status for the command. */
uint16_t error_code;
@@ -7483,15 +7674,7 @@ struct hwrm_func_drv_qver_output {
uint8_t ver_min_8b;
/* This is the 8bit update version of the driver. */
uint8_t ver_upd_8b;
- uint8_t unused_0[2];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
+ uint8_t unused_0[3];
/* This is the 16bit major version of the driver. */
uint16_t ver_maj;
/* This is the 16bit minor version of the driver. */
@@ -7500,6 +7683,15 @@ struct hwrm_func_drv_qver_output {
uint16_t ver_upd;
/* This is the 16bit patch version of the driver. */
uint16_t ver_patch;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
/****************************
@@ -7612,117 +7804,15 @@ struct hwrm_func_resource_qcaps_output {
* The number of TX rings assigned to the function cannot exceed this value.
*/
uint16_t max_tx_scheduler_inputs;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/*****************************
- * hwrm_func_vf_resource_cfg *
- *****************************/
-
-
-/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
-struct hwrm_func_vf_resource_cfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
+ uint16_t flags;
/*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
+ * When this bit is '1', it indicates that VF_RESOURCE_CFG supports
+ * feature to reserve all minimum resources when minimum >= 1, otherwise
+ * returns an error.
*/
- uint64_t resp_addr;
- /* VF ID that is being configured by PF */
- uint16_t vf_id;
- /* Maximum guaranteed number of MSI-X vectors for the function */
- uint16_t max_msix;
- /* Minimum guaranteed number of RSS/COS contexts */
- uint16_t min_rsscos_ctx;
- /* Maximum non-guaranteed number of RSS/COS contexts */
- uint16_t max_rsscos_ctx;
- /* Minimum guaranteed number of completion rings */
- uint16_t min_cmpl_rings;
- /* Maximum non-guaranteed number of completion rings */
- uint16_t max_cmpl_rings;
- /* Minimum guaranteed number of transmit rings */
- uint16_t min_tx_rings;
- /* Maximum non-guaranteed number of transmit rings */
- uint16_t max_tx_rings;
- /* Minimum guaranteed number of receive rings */
- uint16_t min_rx_rings;
- /* Maximum non-guaranteed number of receive rings */
- uint16_t max_rx_rings;
- /* Minimum guaranteed number of L2 contexts */
- uint16_t min_l2_ctxs;
- /* Maximum non-guaranteed number of L2 contexts */
- uint16_t max_l2_ctxs;
- /* Minimum guaranteed number of VNICs */
- uint16_t min_vnics;
- /* Maximum non-guaranteed number of VNICs */
- uint16_t max_vnics;
- /* Minimum guaranteed number of statistic contexts */
- uint16_t min_stat_ctx;
- /* Maximum non-guaranteed number of statistic contexts */
- uint16_t max_stat_ctx;
- /* Minimum guaranteed number of ring groups */
- uint16_t min_hw_ring_grps;
- /* Maximum non-guaranteed number of ring groups */
- uint16_t max_hw_ring_grps;
- uint8_t unused_0[4];
-} __attribute__((packed));
-
-/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
-struct hwrm_func_vf_resource_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Reserved number of RSS/COS contexts */
- uint16_t reserved_rsscos_ctx;
- /* Reserved number of completion rings */
- uint16_t reserved_cmpl_rings;
- /* Reserved number of transmit rings */
- uint16_t reserved_tx_rings;
- /* Reserved number of receive rings */
- uint16_t reserved_rx_rings;
- /* Reserved number of L2 contexts */
- uint16_t reserved_l2_ctxs;
- /* Reserved number of VNICs */
- uint16_t reserved_vnics;
- /* Reserved number of statistic contexts */
- uint16_t reserved_stat_ctx;
- /* Reserved number of ring groups */
- uint16_t reserved_hw_ring_grps;
- uint8_t unused_0[7];
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_FLAGS_MIN_GUARANTEED \
+ UINT32_C(0x1)
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -7769,7 +7859,7 @@ struct hwrm_func_backing_store_qcaps_input {
uint64_t resp_addr;
} __attribute__((packed));
-/* hwrm_func_backing_store_qcaps_output (size:512b/64B) */
+/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
struct hwrm_func_backing_store_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
@@ -7813,19 +7903,51 @@ struct hwrm_func_backing_store_qcaps_output {
uint32_t stat_max_entries;
/* Number of bytes that must be allocated for each context entry. */
uint16_t stat_entry_size;
- /* Maximum number of TQM context entries supported per ring. */
- uint16_t tqm_max_entries_per_ring;
/* Number of bytes that must be allocated for each context entry. */
uint16_t tqm_entry_size;
- /* Number of bytes that must be allocated for each context entry. */
- uint16_t mrav_entry_size;
+ /* Minimum number of TQM context entries required per ring. */
+ uint32_t tqm_min_entries_per_ring;
+ /*
+ * Maximum number of TQM context entries supported per ring. This is
+ * actually a recommended TQM queue size based on worst case usage of
+ * the TQM queue.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * TQM slowpath rings should be sized as follows:
+ *
+ * num_entries = num_vnics + num_l2_tx_rings + num_roce_qps + tqm_min_size
+ *
+ * Where:
+ * num_vnics is the number of VNICs allocated in the VNIC backing store
+ * num_l2_tx_rings is the number of L2 rings in the QP backing store
+ * num_roce_qps is the number of RoCE QPs in the QP backing store
+ * tqm_min_size is tqm_min_entries_per_ring reported by
+ * HWRM_FUNC_BACKING_STORE_QCAPS
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
+ uint32_t tqm_max_entries_per_ring;
/* Maximum number of MR/AV context entries supported for this function. */
uint32_t mrav_max_entries;
- /* Maximum number of Timer context entries supported for this function. */
- uint32_t tim_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t mrav_entry_size;
/* Number of bytes that must be allocated for each context entry. */
uint16_t tim_entry_size;
- uint8_t unused_0;
+ /* Maximum number of Timer context entries supported for this function. */
+ uint32_t tim_max_entries;
+ uint8_t unused_0[2];
+ /*
+ * The number of entries specified for any TQM ring must be a
+ * multiple of this value to prevent any resource allocation
+ * limitations.
+ */
+ uint8_t tqm_entries_multiple;
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -8672,23 +8794,129 @@ struct hwrm_func_backing_store_cfg_input {
uint32_t cq_num_entries;
/* Number of Stats. */
uint32_t stat_num_entries;
- /* Number of TQM slowpath entries. */
+ /*
+ * Number of TQM slowpath entries.
+ *
+ * TQM slowpath rings should be sized as follows:
+ *
+ * num_entries = num_vnics + num_l2_tx_rings + num_roce_qps + tqm_min_size
+ *
+ * Where:
+ * num_vnics is the number of VNICs allocated in the VNIC backing store
+ * num_l2_tx_rings is the number of L2 rings in the QP backing store
+ * num_roce_qps is the number of RoCE QPs in the QP backing store
+ * tqm_min_size is tqm_min_entries_per_ring reported by
+ * HWRM_FUNC_BACKING_STORE_QCAPS
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_sp_num_entries;
- /* Number of TQM ring 0 entries. */
+ /*
+ * Number of TQM ring 0 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring0_num_entries;
- /* Number of TQM ring 1 entries. */
+ /*
+ * Number of TQM ring 1 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring1_num_entries;
- /* Number of TQM ring 2 entries. */
+ /*
+ * Number of TQM ring 2 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring2_num_entries;
- /* Number of TQM ring 3 entries. */
+ /*
+ * Number of TQM ring 3 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring3_num_entries;
- /* Number of TQM ring 4 entries. */
+ /*
+ * Number of TQM ring 4 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring4_num_entries;
- /* Number of TQM ring 5 entries. */
+ /*
+ * Number of TQM ring 5 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring5_num_entries;
- /* Number of TQM ring 6 entries. */
+ /*
+ * Number of TQM ring 6 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring6_num_entries;
- /* Number of TQM ring 7 entries. */
+ /*
+ * Number of TQM ring 7 entries.
+ *
+ * TQM fastpath rings should be sized large enough to accommodate the
+ * maximum number of QPs (either L2 or RoCE, or both if shared)
+ * that can be enqueued to the TQM ring.
+ *
+ * Note that TQM ring sizes cannot be extended while the system is
+ * operational. If a PF driver needs to extend a TQM ring, it needs
+ * to reset the function (e.g. HWRM_FUNC_RESET) and then reallocate
+ * the backing store.
+ */
uint32_t tqm_ring7_num_entries;
/* Number of MR/AV entries. */
uint32_t mrav_num_entries;
@@ -9638,6 +9866,633 @@ struct hwrm_func_backing_store_qcfg_output {
uint8_t valid;
} __attribute__((packed));
+/***********************
+ * hwrm_func_vlan_qcfg *
+ ***********************/
+
+
+/* hwrm_func_vlan_qcfg_input (size:192b/24B) */
+struct hwrm_func_vlan_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
+struct hwrm_func_vlan_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint64_t unused_0;
+ /* S-TAG VLAN identifier configured for the function. */
+ uint16_t stag_vid;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t stag_pcp;
+ uint8_t unused_1;
+ /*
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t stag_tpid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint16_t ctag_vid;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
+ /*
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t ctag_tpid;
+ /* Future use. */
+ uint32_t rsvd2;
+ /* Future use. */
+ uint32_t rsvd3;
+ uint8_t unused_3[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_vlan_cfg *
+ **********************/
+
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
+struct hwrm_func_vlan_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the stag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the ctag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the stag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the ctag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ctag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
+ /* S-TAG VLAN identifier configured for the function. */
+ uint16_t stag_vid;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t stag_pcp;
+ uint8_t unused_1;
+ /*
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t stag_tpid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint16_t ctag_vid;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
+ /*
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
+ */
+ uint16_t ctag_tpid;
+ /* Future use. */
+ uint32_t rsvd1;
+ /* Future use. */
+ uint32_t rsvd2;
+ uint8_t unused_3[4];
+} __attribute__((packed));
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
+struct hwrm_func_vlan_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_func_vf_vnic_ids_query *
+ *******************************/
+
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ uint8_t unused_0[2];
+ /* Max number of vnic ids in vnic id table */
+ uint32_t max_vnic_id_cnt;
+ /* This is the address for VF VNIC ID table */
+ uint64_t vnic_id_tbl_addr;
+} __attribute__((packed));
+
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Actual number of vnic ids
+ *
+ * Each VNIC ID is written as a 32-bit number.
+ */
+ uint32_t vnic_id_cnt;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_func_vf_bw_cfg *
+ ***********************/
+
+
+/* hwrm_func_vf_bw_cfg_input (size:960b/120B) */
+struct hwrm_func_vf_bw_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The number of VF functions that are being configured.
+ * The cmd space allows up to 50 VFs' BW to be configured with one cmd.
+ */
+ uint16_t num_vfs;
+ uint16_t unused[3];
+ /* These 16-bit fields contain the VF fid and the rate scale percentage. */
+ uint16_t vfn[48];
+ /* The physical VF id the adjustment will be made to. */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_VFID_MASK UINT32_C(0xfff)
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_VFID_SFT 0
+ /*
+ * This field configures the rate scale percentage of the VF as specified
+ * by the physical VF id.
+ */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_MASK UINT32_C(0xf000)
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_SFT 12
+ /* 0% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_0 \
+ (UINT32_C(0x0) << 12)
+ /* 6.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_6_66 \
+ (UINT32_C(0x1) << 12)
+ /* 13.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_13_33 \
+ (UINT32_C(0x2) << 12)
+ /* 20% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_20 \
+ (UINT32_C(0x3) << 12)
+ /* 26.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_26_66 \
+ (UINT32_C(0x4) << 12)
+ /* 33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_33_33 \
+ (UINT32_C(0x5) << 12)
+ /* 40% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_40 \
+ (UINT32_C(0x6) << 12)
+ /* 46.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_46_66 \
+ (UINT32_C(0x7) << 12)
+ /* 53.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_53_33 \
+ (UINT32_C(0x8) << 12)
+ /* 60% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_60 \
+ (UINT32_C(0x9) << 12)
+ /* 66.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_66_66 \
+ (UINT32_C(0xa) << 12)
+ /* 53.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_73_33 \
+ (UINT32_C(0xb) << 12)
+ /* 80% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_80 \
+ (UINT32_C(0xc) << 12)
+ /* 86.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_86_66 \
+ (UINT32_C(0xd) << 12)
+ /* 93.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_93_33 \
+ (UINT32_C(0xe) << 12)
+ /* 100% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_100 \
+ (UINT32_C(0xf) << 12)
+ #define HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_LAST \
+ HWRM_FUNC_VF_BW_CFG_INPUT_VFN_RATE_PCT_100
+} __attribute__((packed));
+
+/* hwrm_func_vf_bw_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_bw_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_func_vf_bw_qcfg *
+ ************************/
+
+
+/* hwrm_func_vf_bw_qcfg_input (size:960b/120B) */
+struct hwrm_func_vf_bw_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The number of VF functions that are being queried.
+ * The inline response space allows the host to query up to 50 VFs'
+ * rate scale percentage
+ */
+ uint16_t num_vfs;
+ uint16_t unused[3];
+ /* These 16-bit fields contain the VF fid */
+ uint16_t vfn[48];
+ /* The physical VF id of interest */
+ #define HWRM_FUNC_VF_BW_QCFG_INPUT_VFN_VFID_MASK UINT32_C(0xfff)
+ #define HWRM_FUNC_VF_BW_QCFG_INPUT_VFN_VFID_SFT 0
+} __attribute__((packed));
+
+/* hwrm_func_vf_bw_qcfg_output (size:960b/120B) */
+struct hwrm_func_vf_bw_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * The number of VF functions that are being queried.
+ * The inline response space allows the host to query up to 50 VFs' rate
+ * scale percentage
+ */
+ uint16_t num_vfs;
+ uint16_t unused[3];
+ /* These 16-bit fields contain the VF fid and the rate scale percentage. */
+ uint16_t vfn[48];
+ /* The physical VF id the adjustment will be made to. */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_VFID_MASK UINT32_C(0xfff)
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_VFID_SFT 0
+ /*
+ * This field configures the rate scale percentage of the VF as specified
+ * by the physical VF id.
+ */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_MASK UINT32_C(0xf000)
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_SFT 12
+ /* 0% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_0 \
+ (UINT32_C(0x0) << 12)
+ /* 6.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_6_66 \
+ (UINT32_C(0x1) << 12)
+ /* 13.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_13_33 \
+ (UINT32_C(0x2) << 12)
+ /* 20% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_20 \
+ (UINT32_C(0x3) << 12)
+ /* 26.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_26_66 \
+ (UINT32_C(0x4) << 12)
+ /* 33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_33_33 \
+ (UINT32_C(0x5) << 12)
+ /* 40% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_40 \
+ (UINT32_C(0x6) << 12)
+ /* 46.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_46_66 \
+ (UINT32_C(0x7) << 12)
+ /* 53.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_53_33 \
+ (UINT32_C(0x8) << 12)
+ /* 60% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_60 \
+ (UINT32_C(0x9) << 12)
+ /* 66.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_66_66 \
+ (UINT32_C(0xa) << 12)
+ /* 53.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_73_33 \
+ (UINT32_C(0xb) << 12)
+ /* 80% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_80 \
+ (UINT32_C(0xc) << 12)
+ /* 86.66% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_86_66 \
+ (UINT32_C(0xd) << 12)
+ /* 93.33% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_93_33 \
+ (UINT32_C(0xe) << 12)
+ /* 100% of the max tx rate */
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_100 \
+ (UINT32_C(0xf) << 12)
+ #define HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_LAST \
+ HWRM_FUNC_VF_BW_QCFG_OUTPUT_VFN_RATE_PCT_100
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_func_drv_if_change *
+ ***************************/
+
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function driver is indicating
+ * that the IF state is changing to UP state. The call should
+ * be made at the beginning of the driver's open call before
+ * resources are allocated. After making the call, the driver
+ * should check the response to see if any resources may have
+ * changed (see the response below). If the driver fails
+ * the open call, the driver should make this call again with
+ * this bit cleared to indicate that the IF state is not UP.
+ * During the driver's close call when the IF state is changing
+ * to DOWN, the driver should make this call with the bit cleared
+ * after all resources have been freed.
+ */
+ #define HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP UINT32_C(0x1)
+ uint32_t unused;
+} __attribute__((packed));
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When this bit is '1', it indicates that the resources reserved
+ * for this function may have changed. The driver should check
+ * resource capabilities and reserve resources again before
+ * allocating resources.
+ */
+ #define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_RESC_CHANGE \
+ UINT32_C(0x1)
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
/*********************
* hwrm_port_phy_cfg *
*********************/
@@ -11916,6 +12771,362 @@ struct hwrm_port_mac_ptp_qcfg_output {
uint8_t valid;
} __attribute__((packed));
+/* Port Tx Statistics Formats */
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ /* Total Number of 64 Bytes frames transmitted */
+ uint64_t tx_64b_frames;
+ /* Total Number of 65-127 Bytes frames transmitted */
+ uint64_t tx_65b_127b_frames;
+ /* Total Number of 128-255 Bytes frames transmitted */
+ uint64_t tx_128b_255b_frames;
+ /* Total Number of 256-511 Bytes frames transmitted */
+ uint64_t tx_256b_511b_frames;
+ /* Total Number of 512-1023 Bytes frames transmitted */
+ uint64_t tx_512b_1023b_frames;
+ /* Total Number of 1024-1518 Bytes frames transmitted */
+ uint64_t tx_1024b_1518b_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame transmitted which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
+ */
+ uint64_t tx_good_vlan_frames;
+ /* Total Number of 1519-2047 Bytes frames transmitted */
+ uint64_t tx_1519b_2047b_frames;
+ /* Total Number of 2048-4095 Bytes frames transmitted */
+ uint64_t tx_2048b_4095b_frames;
+ /* Total Number of 4096-9216 Bytes frames transmitted */
+ uint64_t tx_4096b_9216b_frames;
+ /* Total Number of 9217-16383 Bytes frames transmitted */
+ uint64_t tx_9217b_16383b_frames;
+ /* Total Number of good frames transmitted */
+ uint64_t tx_good_frames;
+ /* Total Number of frames transmitted */
+ uint64_t tx_total_frames;
+ /* Total number of unicast frames transmitted */
+ uint64_t tx_ucast_frames;
+ /* Total number of multicast frames transmitted */
+ uint64_t tx_mcast_frames;
+ /* Total number of broadcast frames transmitted */
+ uint64_t tx_bcast_frames;
+ /* Total number of PAUSE control frames transmitted */
+ uint64_t tx_pause_frames;
+ /*
+ * Total number of PFC/per-priority PAUSE
+ * control frames transmitted
+ */
+ uint64_t tx_pfc_frames;
+ /* Total number of jabber frames transmitted */
+ uint64_t tx_jabber_frames;
+ /* Total number of frames transmitted with FCS error */
+ uint64_t tx_fcs_err_frames;
+ /* Total number of control frames transmitted */
+ uint64_t tx_control_frames;
+ /* Total number of over-sized frames transmitted */
+ uint64_t tx_oversz_frames;
+ /* Total number of frames with single deferral */
+ uint64_t tx_single_dfrl_frames;
+ /* Total number of frames with multiple deferrals */
+ uint64_t tx_multi_dfrl_frames;
+ /* Total number of frames with single collision */
+ uint64_t tx_single_coll_frames;
+ /* Total number of frames with multiple collisions */
+ uint64_t tx_multi_coll_frames;
+ /* Total number of frames with late collisions */
+ uint64_t tx_late_coll_frames;
+ /* Total number of frames with excessive collisions */
+ uint64_t tx_excessive_coll_frames;
+ /* Total number of fragmented frames transmitted */
+ uint64_t tx_frag_frames;
+ /* Total number of transmit errors */
+ uint64_t tx_err;
+ /* Total number of single VLAN tagged frames transmitted */
+ uint64_t tx_tagged_frames;
+ /* Total number of double VLAN tagged frames transmitted */
+ uint64_t tx_dbl_tagged_frames;
+ /* Total number of runt frames transmitted */
+ uint64_t tx_runt_frames;
+ /* Total number of TX FIFO under runs */
+ uint64_t tx_fifo_underruns;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 0 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri0;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 1 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri1;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 2 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri2;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 3 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri3;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 4 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri4;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 5 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri5;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 6 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri6;
+ /*
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 7 transmitted
+ */
+ uint64_t tx_pfc_ena_frames_pri7;
+ /* Total number of EEE LPI Events on TX */
+ uint64_t tx_eee_lpi_events;
+ /* EEE LPI Duration Counter on TX */
+ uint64_t tx_eee_lpi_duration;
+ /*
+ * Total number of Link Level Flow Control (LLFC) messages
+ * transmitted
+ */
+ uint64_t tx_llfc_logical_msgs;
+ /* Total number of HCFC messages transmitted */
+ uint64_t tx_hcfc_msgs;
+ /* Total number of TX collisions */
+ uint64_t tx_total_collisions;
+ /* Total number of transmitted bytes */
+ uint64_t tx_bytes;
+ /* Total number of end-to-end HOL frames */
+ uint64_t tx_xthol_frames;
+ /* Total Tx Drops per Port reported by STATS block */
+ uint64_t tx_stat_discard;
+ /* Total Tx Error Drops per Port reported by STATS block */
+ uint64_t tx_stat_error;
+} __attribute__((packed));
+
+/* Port Rx Statistics Formats */
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ /* Total Number of 64 Bytes frames received */
+ uint64_t rx_64b_frames;
+ /* Total Number of 65-127 Bytes frames received */
+ uint64_t rx_65b_127b_frames;
+ /* Total Number of 128-255 Bytes frames received */
+ uint64_t rx_128b_255b_frames;
+ /* Total Number of 256-511 Bytes frames received */
+ uint64_t rx_256b_511b_frames;
+ /* Total Number of 512-1023 Bytes frames received */
+ uint64_t rx_512b_1023b_frames;
+ /* Total Number of 1024-1518 Bytes frames received */
+ uint64_t rx_1024b_1518b_frames;
+ /*
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame received which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
+ */
+ uint64_t rx_good_vlan_frames;
+ /* Total Number of 1519-2047 Bytes frames received */
+ uint64_t rx_1519b_2047b_frames;
+ /* Total Number of 2048-4095 Bytes frames received */
+ uint64_t rx_2048b_4095b_frames;
+ /* Total Number of 4096-9216 Bytes frames received */
+ uint64_t rx_4096b_9216b_frames;
+ /* Total Number of 9217-16383 Bytes frames received */
+ uint64_t rx_9217b_16383b_frames;
+ /* Total number of frames received */
+ uint64_t rx_total_frames;
+ /* Total number of unicast frames received */
+ uint64_t rx_ucast_frames;
+ /* Total number of multicast frames received */
+ uint64_t rx_mcast_frames;
+ /* Total number of broadcast frames received */
+ uint64_t rx_bcast_frames;
+ /* Total number of received frames with FCS error */
+ uint64_t rx_fcs_err_frames;
+ /* Total number of control frames received */
+ uint64_t rx_ctrl_frames;
+ /* Total number of PAUSE frames received */
+ uint64_t rx_pause_frames;
+ /* Total number of PFC frames received */
+ uint64_t rx_pfc_frames;
+ /*
+ * Total number of frames received with an unsupported
+ * opcode
+ */
+ uint64_t rx_unsupported_opcode_frames;
+ /*
+ * Total number of frames received with an unsupported
+ * DA for pause and PFC
+ */
+ uint64_t rx_unsupported_da_pausepfc_frames;
+ /* Total number of frames received with an unsupported SA */
+ uint64_t rx_wrong_sa_frames;
+ /* Total number of received packets with alignment error */
+ uint64_t rx_align_err_frames;
+ /* Total number of received frames with out-of-range length */
+ uint64_t rx_oor_len_frames;
+ /* Total number of received frames with error termination */
+ uint64_t rx_code_err_frames;
+ /*
+ * Total number of received frames with a false carrier is
+ * detected during idle, as defined by RX_ER samples active
+ * and RXD is 0xE. The event is reported along with the
+ * statistics generated on the next received frame. Only
+ * one false carrier condition can be detected and logged
+ * between frames.
+ *
+ * Carrier event, valid for 10M/100M speed modes only.
+ */
+ uint64_t rx_false_carrier_frames;
+ /* Total number of over-sized frames received */
+ uint64_t rx_ovrsz_frames;
+ /* Total number of jabber packets received */
+ uint64_t rx_jbr_frames;
+ /* Total number of received frames with MTU error */
+ uint64_t rx_mtu_err_frames;
+ /* Total number of received frames with CRC match */
+ uint64_t rx_match_crc_frames;
+ /* Total number of frames received promiscuously */
+ uint64_t rx_promiscuous_frames;
+ /*
+ * Total number of received frames with one or two VLAN
+ * tags
+ */
+ uint64_t rx_tagged_frames;
+ /* Total number of received frames with two VLAN tags */
+ uint64_t rx_double_tagged_frames;
+ /* Total number of truncated frames received */
+ uint64_t rx_trunc_frames;
+ /* Total number of good frames (without errors) received */
+ uint64_t rx_good_frames;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 0
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri0;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 1
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri1;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 2
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri2;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 3
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri3;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 4
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri4;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 5
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri5;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 6
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri6;
+ /*
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 7
+ */
+ uint64_t rx_pfc_xon2xoff_frames_pri7;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 0
+ */
+ uint64_t rx_pfc_ena_frames_pri0;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 1
+ */
+ uint64_t rx_pfc_ena_frames_pri1;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 2
+ */
+ uint64_t rx_pfc_ena_frames_pri2;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 3
+ */
+ uint64_t rx_pfc_ena_frames_pri3;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 4
+ */
+ uint64_t rx_pfc_ena_frames_pri4;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 5
+ */
+ uint64_t rx_pfc_ena_frames_pri5;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 6
+ */
+ uint64_t rx_pfc_ena_frames_pri6;
+ /*
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 7
+ */
+ uint64_t rx_pfc_ena_frames_pri7;
+ /* Total Number of frames received with SCH CRC error */
+ uint64_t rx_sch_crc_err_frames;
+ /* Total Number of under-sized frames received */
+ uint64_t rx_undrsz_frames;
+ /* Total Number of fragmented frames received */
+ uint64_t rx_frag_frames;
+ /* Total number of RX EEE LPI Events */
+ uint64_t rx_eee_lpi_events;
+ /* EEE LPI Duration Counter on RX */
+ uint64_t rx_eee_lpi_duration;
+ /*
+ * Total number of physical type Link Level Flow Control
+ * (LLFC) messages received
+ */
+ uint64_t rx_llfc_physical_msgs;
+ /*
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received
+ */
+ uint64_t rx_llfc_logical_msgs;
+ /*
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received with CRC error
+ */
+ uint64_t rx_llfc_msgs_with_crc_err;
+ /* Total number of HCFC messages received */
+ uint64_t rx_hcfc_msgs;
+ /* Total number of HCFC messages received with CRC error */
+ uint64_t rx_hcfc_msgs_with_crc_err;
+ /* Total number of received bytes */
+ uint64_t rx_bytes;
+ /* Total number of bytes received in runt frames */
+ uint64_t rx_runt_bytes;
+ /* Total number of runt frames received */
+ uint64_t rx_runt_frames;
+ /* Total Rx Discards per Port reported by STATS block */
+ uint64_t rx_stat_discard;
+ uint64_t rx_stat_err;
+} __attribute__((packed));
+
/********************
* hwrm_port_qstats *
********************/
@@ -11990,6 +13201,154 @@ struct hwrm_port_qstats_output {
uint8_t valid;
} __attribute__((packed));
+/* Port Tx Statistics extended Formats */
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ /* Total number of tx bytes count on cos queue 0 */
+ uint64_t tx_bytes_cos0;
+ /* Total number of tx bytes count on cos queue 1 */
+ uint64_t tx_bytes_cos1;
+ /* Total number of tx bytes count on cos queue 2 */
+ uint64_t tx_bytes_cos2;
+ /* Total number of tx bytes count on cos queue 3 */
+ uint64_t tx_bytes_cos3;
+ /* Total number of tx bytes count on cos queue 4 */
+ uint64_t tx_bytes_cos4;
+ /* Total number of tx bytes count on cos queue 5 */
+ uint64_t tx_bytes_cos5;
+ /* Total number of tx bytes count on cos queue 6 */
+ uint64_t tx_bytes_cos6;
+ /* Total number of tx bytes count on cos queue 7 */
+ uint64_t tx_bytes_cos7;
+ /* Total number of tx packets count on cos queue 0 */
+ uint64_t tx_packets_cos0;
+ /* Total number of tx packets count on cos queue 1 */
+ uint64_t tx_packets_cos1;
+ /* Total number of tx packets count on cos queue 2 */
+ uint64_t tx_packets_cos2;
+ /* Total number of tx packets count on cos queue 3 */
+ uint64_t tx_packets_cos3;
+ /* Total number of tx packets count on cos queue 4 */
+ uint64_t tx_packets_cos4;
+ /* Total number of tx packets count on cos queue 5 */
+ uint64_t tx_packets_cos5;
+ /* Total number of tx packets count on cos queue 6 */
+ uint64_t tx_packets_cos6;
+ /* Total number of tx packets count on cos queue 7 */
+ uint64_t tx_packets_cos7;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */
+ uint64_t pfc_pri0_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */
+ uint64_t pfc_pri0_tx_transitions;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */
+ uint64_t pfc_pri1_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */
+ uint64_t pfc_pri1_tx_transitions;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */
+ uint64_t pfc_pri2_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */
+ uint64_t pfc_pri2_tx_transitions;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */
+ uint64_t pfc_pri3_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */
+ uint64_t pfc_pri3_tx_transitions;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */
+ uint64_t pfc_pri4_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */
+ uint64_t pfc_pri4_tx_transitions;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */
+ uint64_t pfc_pri5_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */
+ uint64_t pfc_pri5_tx_transitions;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */
+ uint64_t pfc_pri6_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */
+ uint64_t pfc_pri6_tx_transitions;
+ /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */
+ uint64_t pfc_pri7_tx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */
+ uint64_t pfc_pri7_tx_transitions;
+} __attribute__((packed));
+
+/* Port Rx Statistics extended Formats */
+/* rx_port_stats_ext (size:2368b/296B) */
+struct rx_port_stats_ext {
+ /* Number of times link state changed to down */
+ uint64_t link_down_events;
+ /* Number of times the idle rings with pause bit are found */
+ uint64_t continuous_pause_events;
+ /* Number of times the active rings pause bit resumed back */
+ uint64_t resume_pause_events;
+ /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */
+ uint64_t continuous_roce_pause_events;
+ /* Number of times, the ROCE cos queue PFC is enabled back */
+ uint64_t resume_roce_pause_events;
+ /* Total number of rx bytes count on cos queue 0 */
+ uint64_t rx_bytes_cos0;
+ /* Total number of rx bytes count on cos queue 1 */
+ uint64_t rx_bytes_cos1;
+ /* Total number of rx bytes count on cos queue 2 */
+ uint64_t rx_bytes_cos2;
+ /* Total number of rx bytes count on cos queue 3 */
+ uint64_t rx_bytes_cos3;
+ /* Total number of rx bytes count on cos queue 4 */
+ uint64_t rx_bytes_cos4;
+ /* Total number of rx bytes count on cos queue 5 */
+ uint64_t rx_bytes_cos5;
+ /* Total number of rx bytes count on cos queue 6 */
+ uint64_t rx_bytes_cos6;
+ /* Total number of rx bytes count on cos queue 7 */
+ uint64_t rx_bytes_cos7;
+ /* Total number of rx packets count on cos queue 0 */
+ uint64_t rx_packets_cos0;
+ /* Total number of rx packets count on cos queue 1 */
+ uint64_t rx_packets_cos1;
+ /* Total number of rx packets count on cos queue 2 */
+ uint64_t rx_packets_cos2;
+ /* Total number of rx packets count on cos queue 3 */
+ uint64_t rx_packets_cos3;
+ /* Total number of rx packets count on cos queue 4 */
+ uint64_t rx_packets_cos4;
+ /* Total number of rx packets count on cos queue 5 */
+ uint64_t rx_packets_cos5;
+ /* Total number of rx packets count on cos queue 6 */
+ uint64_t rx_packets_cos6;
+ /* Total number of rx packets count on cos queue 7 */
+ uint64_t rx_packets_cos7;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */
+ uint64_t pfc_pri0_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */
+ uint64_t pfc_pri0_rx_transitions;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */
+ uint64_t pfc_pri1_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */
+ uint64_t pfc_pri1_rx_transitions;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */
+ uint64_t pfc_pri2_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */
+ uint64_t pfc_pri2_rx_transitions;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */
+ uint64_t pfc_pri3_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */
+ uint64_t pfc_pri3_rx_transitions;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */
+ uint64_t pfc_pri4_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */
+ uint64_t pfc_pri4_rx_transitions;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */
+ uint64_t pfc_pri5_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */
+ uint64_t pfc_pri5_rx_transitions;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */
+ uint64_t pfc_pri6_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */
+ uint64_t pfc_pri6_rx_transitions;
+ /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */
+ uint64_t pfc_pri7_rx_duration_us;
+ /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */
+ uint64_t pfc_pri7_rx_transitions;
+} __attribute__((packed));
+
/************************
* hwrm_port_qstats_ext *
************************/
@@ -12063,7 +13422,15 @@ struct hwrm_port_qstats_ext_output {
uint16_t tx_stat_size;
/* The size of RX port statistics block in bytes. */
uint16_t rx_stat_size;
- uint8_t unused_0[3];
+ /* Total number of active cos queues available. */
+ uint16_t total_active_cos_queues;
+ uint8_t flags;
+ /*
+ * If set to 1, then this field indicates that clear
+ * roce specific counters is supported.
+ */
+ #define HWRM_PORT_QSTATS_EXT_OUTPUT_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED \
+ UINT32_C(0x1)
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -12187,68 +13554,22 @@ struct hwrm_port_clr_stats_input {
uint64_t resp_addr;
/* Port ID of port that is being queried. */
uint16_t port_id;
- uint8_t unused_0[6];
-} __attribute__((packed));
-
-/* hwrm_port_clr_stats_output (size:128b/16B) */
-struct hwrm_port_clr_stats_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/****************************
- * hwrm_port_lpbk_clr_stats *
- ****************************/
-
-
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
+ uint8_t flags;
/*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
+ * If set to 1, then this field indicates clear the following RoCE
+ * specific counters.
+ * RoCE associated TX/RX cos counters
+ * CNP associated TX/RX cos counters
+ * RoCE/CNP specific TX/RX flow counters
+ * Firmware will determine the RoCE/CNP cos queue based on qos profile.
+ * This flag is honored only when RoCE is enabled on that port.
*/
- uint64_t resp_addr;
+ #define HWRM_PORT_CLR_STATS_INPUT_FLAGS_ROCE_COUNTERS UINT32_C(0x1)
+ uint8_t unused_0[5];
} __attribute__((packed));
-/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_output {
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -12268,83 +13589,6 @@ struct hwrm_port_lpbk_clr_stats_output {
uint8_t valid;
} __attribute__((packed));
-/**********************
- * hwrm_port_ts_query *
- **********************/
-
-
-/* hwrm_port_ts_query_input (size:192b/24B) */
-struct hwrm_port_ts_query_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint32_t flags;
- /*
- * Enumeration denoting the RX, TX type of the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
- */
- #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
- /* rx path */
- #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_LAST \
- HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX
- /* Port ID of port that is being queried. */
- uint16_t port_id;
- uint8_t unused_0[2];
-} __attribute__((packed));
-
-/* hwrm_port_ts_query_output (size:192b/24B) */
-struct hwrm_port_ts_query_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Timestamp value of PTP message captured. */
- uint64_t ptp_msg_ts;
- /* Sequence ID of the PTP message captured. */
- uint16_t ptp_msg_seqid;
- uint8_t unused_0[5];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/***********************
* hwrm_port_phy_qcaps *
***********************/
@@ -12593,172 +13837,6 @@ struct hwrm_port_phy_qcaps_output {
#define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24
} __attribute__((packed));
-/***************************
- * hwrm_port_phy_i2c_write *
- ***************************/
-
-
-/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
-struct hwrm_port_phy_i2c_write_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint32_t flags;
- uint32_t enables;
- /*
- * This bit must be '1' for the page_offset field to be
- * configured.
- */
- #define HWRM_PORT_PHY_I2C_WRITE_INPUT_ENABLES_PAGE_OFFSET \
- UINT32_C(0x1)
- /* Port ID of port. */
- uint16_t port_id;
- /* 8-bit I2C slave address. */
- uint8_t i2c_slave_addr;
- uint8_t unused_0;
- /* The page number that is being accessed over I2C. */
- uint16_t page_number;
- /* Offset within the page that is being accessed over I2C. */
- uint16_t page_offset;
- /*
- * Length of data to write, in bytes starting at the offset
- * specified above. If the offset is not specified, then
- * the data shall be written from the beginning of the page.
- */
- uint8_t data_length;
- uint8_t unused_1[7];
- /* Up to 64B of data. */
- uint32_t data[16];
-} __attribute__((packed));
-
-/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
-struct hwrm_port_phy_i2c_write_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/**************************
- * hwrm_port_phy_i2c_read *
- **************************/
-
-
-/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
-struct hwrm_port_phy_i2c_read_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint32_t flags;
- uint32_t enables;
- /*
- * This bit must be '1' for the page_offset field to be
- * configured.
- */
- #define HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET \
- UINT32_C(0x1)
- /* Port ID of port. */
- uint16_t port_id;
- /* 8-bit I2C slave address. */
- uint8_t i2c_slave_addr;
- uint8_t unused_0;
- /* The page number that is being accessed over I2C. */
- uint16_t page_number;
- /* Offset within the page that is being accessed over I2C. */
- uint16_t page_offset;
- /*
- * Length of data to read, in bytes starting at the offset
- * specified above. If the offset is not specified, then
- * the data shall be read from the beginning of the page.
- */
- uint8_t data_length;
- uint8_t unused_1[7];
-} __attribute__((packed));
-
-/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
-struct hwrm_port_phy_i2c_read_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Up to 64B of data. */
- uint32_t data[16];
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/*********************
* hwrm_port_led_cfg *
*********************/
@@ -17127,247 +18205,6 @@ struct hwrm_queue_cos2bw_cfg_output {
uint8_t valid;
} __attribute__((packed));
-/*************************
- * hwrm_queue_dscp_qcaps *
- *************************/
-
-
-/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
-struct hwrm_queue_dscp_qcaps_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * Port ID of port for which the table is being configured.
- * The HWRM needs to check whether this function is allowed
- * to configure pri2cos mapping on this port.
- */
- uint8_t port_id;
- uint8_t unused_0[7];
-} __attribute__((packed));
-
-/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
-struct hwrm_queue_dscp_qcaps_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* The number of bits provided by the hardware for the DSCP value. */
- uint8_t num_dscp_bits;
- uint8_t unused_0;
- /* Max number of DSCP-MASK-PRI entries supported. */
- uint16_t max_entries;
- uint8_t unused_1[3];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/****************************
- * hwrm_queue_dscp2pri_qcfg *
- ****************************/
-
-
-/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
-struct hwrm_queue_dscp2pri_qcfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * This is the host address where the 24-bits DSCP-MASK-PRI
- * tuple(s) will be copied to.
- */
- uint64_t dest_data_addr;
- /*
- * Port ID of port for which the table is being configured.
- * The HWRM needs to check whether this function is allowed
- * to configure pri2cos mapping on this port.
- */
- uint8_t port_id;
- uint8_t unused_0;
- /* Size of the buffer pointed to by dest_data_addr. */
- uint16_t dest_data_buffer_size;
- uint8_t unused_1[4];
-} __attribute__((packed));
-
-/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_qcfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /*
- * A count of the number of DSCP-MASK-PRI tuple(s) pointed to
- * by the dest_data_addr.
- */
- uint16_t entry_cnt;
- /*
- * This is the default PRI which un-initialized DSCP values are
- * mapped to.
- */
- uint8_t default_pri;
- uint8_t unused_0[4];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/***************************
- * hwrm_queue_dscp2pri_cfg *
- ***************************/
-
-
-/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
-struct hwrm_queue_dscp2pri_cfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * This is the host address where the 24-bits DSCP-MASK-PRI tuple
- * will be copied from.
- */
- uint64_t src_data_addr;
- uint32_t flags;
- /* use_hw_default_pri is 1 b */
- #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_FLAGS_USE_HW_DEFAULT_PRI \
- UINT32_C(0x1)
- uint32_t enables;
- /*
- * This bit must be '1' for the default_pri field to be
- * configured.
- */
- #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_ENABLES_DEFAULT_PRI \
- UINT32_C(0x1)
- /*
- * Port ID of port for which the table is being configured.
- * The HWRM needs to check whether this function is allowed
- * to configure pri2cos mapping on this port.
- */
- uint8_t port_id;
- /*
- * This is the default PRI which un-initialized DSCP values will be
- * mapped to.
- */
- uint8_t default_pri;
- /*
- * A count of the number of DSCP-MASK-PRI tuple(s) in the data pointed
- * to by src_data_addr.
- */
- uint16_t entry_cnt;
- uint8_t unused_0[4];
-} __attribute__((packed));
-
-/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/*******************
* hwrm_vnic_alloc *
*******************/
@@ -18182,177 +19019,6 @@ struct hwrm_vnic_tpa_cfg_output {
uint8_t valid;
} __attribute__((packed));
-/**********************
- * hwrm_vnic_tpa_qcfg *
- **********************/
-
-
-/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_tpa_qcfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Logical vnic ID */
- uint16_t vnic_id;
- uint8_t unused_0[6];
-} __attribute__((packed));
-
-/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
-struct hwrm_vnic_tpa_qcfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint32_t flags;
- /*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) of
- * non-tunneled TCP packets.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \
- UINT32_C(0x1)
- /*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) of
- * tunneled TCP packets.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \
- UINT32_C(0x2)
- /*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) according
- * to Windows Receive Segment Coalescing (RSC) rules.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \
- UINT32_C(0x4)
- /*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) according
- * to Linux Generic Receive Offload (GRO) rules.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \
- UINT32_C(0x8)
- /*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) for TCP
- * packets with IP ECN set to non-zero.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \
- UINT32_C(0x10)
- /*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) for
- * GRE tunneled TCP packets only if all packets have the
- * same GRE sequence.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
- UINT32_C(0x20)
- /*
- * When this bit is '1' and the GRO mode is enabled,
- * the VNIC is configured to
- * perform transparent packet aggregation (TPA) for
- * TCP/IPv4 packets with consecutively increasing IPIDs.
- * In other words, the last packet that is being
- * aggregated to an already existing aggregation context
- * shall have IPID 1 more than the IPID of the last packet
- * that was aggregated in that aggregation context.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \
- UINT32_C(0x40)
- /*
- * When this bit is '1' and the GRO mode is enabled,
- * the VNIC is configured to
- * perform transparent packet aggregation (TPA) for
- * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
- * value.
- */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \
- UINT32_C(0x80)
- /*
- * This is the maximum number of TCP segments that can
- * be aggregated (unit is Log2). Max value is 31.
- */
- uint16_t max_agg_segs;
- /* 1 segment */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
- /* 2 segments */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
- /* 4 segments */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
- /* 8 segments */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
- /* Any segment size larger than this is not valid */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \
- HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX
- /*
- * This is the maximum number of aggregations this VNIC is
- * allowed (unit is Log2). Max value is 7
- */
- uint16_t max_aggs;
- /* 1 aggregation */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0)
- /* 2 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1)
- /* 4 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2)
- /* 8 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3)
- /* 16 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4)
- /* Any aggregation size larger than this is not valid */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7)
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \
- HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX
- /*
- * This is the maximum amount of time allowed for
- * an aggregation context to complete after it was initiated.
- */
- uint32_t max_agg_timer;
- /*
- * This is the minimum amount of payload length required to
- * start an aggregation context.
- */
- uint32_t min_agg_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/*********************
* hwrm_vnic_rss_cfg *
*********************/
@@ -19060,7 +19726,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
*******************/
-/* hwrm_ring_alloc_input (size:640b/80B) */
+/* hwrm_ring_alloc_input (size:704b/88B) */
struct hwrm_ring_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -19142,7 +19808,18 @@ struct hwrm_ring_alloc_input {
#define HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ UINT32_C(0x5)
#define HWRM_RING_ALLOC_INPUT_RING_TYPE_LAST \
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ
- uint8_t unused_0[3];
+ uint8_t unused_0;
+ /* Ring allocation flags. */
+ uint16_t flags;
+ /*
+ * For Rx rings, the incoming packet data can be placed at either
+ * a 0B or 2B offset from the start of the Rx packet buffer. When
+ * '1', the received packet will be padded with 2B of zeros at the
+ * front of the packet. Note that this flag is only used for
+ * Rx rings and is ignored for all other rings included Rx
+ * Aggregation rings.
+ */
+ #define HWRM_RING_ALLOC_INPUT_FLAGS_RX_SOP_PAD UINT32_C(0x1)
/*
* This value is a pointer to the page table for the
* Ring.
@@ -19347,6 +20024,13 @@ struct hwrm_ring_alloc_input {
#define HWRM_RING_ALLOC_INPUT_INT_MODE_LAST \
HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
uint8_t unused_4[3];
+ /*
+ * The cq_handle is specified when allocating a completion ring. For
+ * devices that support NQs, this cq_handle will be included in the
+ * NQE to specify which CQ should be read to retrieve the completion
+ * record.
+ */
+ uint64_t cq_handle;
} __attribute__((packed));
/* hwrm_ring_alloc_output (size:128b/16B) */
@@ -19454,6 +20138,228 @@ struct hwrm_ring_free_output {
uint8_t valid;
} __attribute__((packed));
+/*******************
+ * hwrm_ring_reset *
+ *******************/
+
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Ring Type. */
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \
+ HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL
+ uint8_t unused_0;
+ /* Physical number of the ring. */
+ uint16_t ring_id;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_ring_aggint_qcaps *
+ **************************/
+
+
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t cmpl_params;
+ /*
+ * When this bit is set to '1', int_lat_tmr_min can be configured
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_INT_LAT_TMR_MIN \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', int_lat_tmr_max can be configured
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_INT_LAT_TMR_MAX \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', timer_reset can be enabled
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_TIMER_RESET \
+ UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', ring_idle can be enabled
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_RING_IDLE \
+ UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', num_cmpl_dma_aggr can be configured
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR \
+ UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', num_cmpl_dma_aggr_during_int can be configured
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT \
+ UINT32_C(0x20)
+ /*
+ * When this bit is set to '1', cmpl_aggr_dma_tmr can be configured
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR \
+ UINT32_C(0x40)
+ /*
+ * When this bit is set to '1', cmpl_aggr_dma_tmr_during_int can be configured
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT \
+ UINT32_C(0x80)
+ /*
+ * When this bit is set to '1', num_cmpl_aggr_int can be configured
+ * on completion rings.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_AGGR_INT \
+ UINT32_C(0x100)
+ uint32_t nq_params;
+ /*
+ * When this bit is set to '1', int_lat_tmr_min can be configured
+ * on notification queues.
+ */
+ #define HWRM_RING_AGGINT_QCAPS_OUTPUT_NQ_PARAMS_INT_LAT_TMR_MIN \
+ UINT32_C(0x1)
+ /* Minimum value for num_cmpl_dma_aggr */
+ uint16_t num_cmpl_dma_aggr_min;
+ /* Maximum value for num_cmpl_dma_aggr */
+ uint16_t num_cmpl_dma_aggr_max;
+ /* Minimum value for num_cmpl_dma_aggr_during_int */
+ uint16_t num_cmpl_dma_aggr_during_int_min;
+ /* Maximum value for num_cmpl_dma_aggr_during_int */
+ uint16_t num_cmpl_dma_aggr_during_int_max;
+ /* Minimum value for cmpl_aggr_dma_tmr */
+ uint16_t cmpl_aggr_dma_tmr_min;
+ /* Maximum value for cmpl_aggr_dma_tmr */
+ uint16_t cmpl_aggr_dma_tmr_max;
+ /* Minimum value for cmpl_aggr_dma_tmr_during_int */
+ uint16_t cmpl_aggr_dma_tmr_during_int_min;
+ /* Maximum value for cmpl_aggr_dma_tmr_during_int */
+ uint16_t cmpl_aggr_dma_tmr_during_int_max;
+ /* Minimum value for int_lat_tmr_min */
+ uint16_t int_lat_tmr_min_min;
+ /* Maximum value for int_lat_tmr_min */
+ uint16_t int_lat_tmr_min_max;
+ /* Minimum value for int_lat_tmr_max */
+ uint16_t int_lat_tmr_max_min;
+ /* Maximum value for int_lat_tmr_max */
+ uint16_t int_lat_tmr_max_max;
+ /* Minimum value for num_cmpl_aggr_int */
+ uint16_t num_cmpl_aggr_int_min;
+ /* Maximum value for num_cmpl_aggr_int */
+ uint16_t num_cmpl_aggr_int_max;
+ /* The units for timer parameters, in nanoseconds. */
+ uint16_t timer_units;
+ uint8_t unused_0[1];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
/**************************************
* hwrm_ring_cmpl_ring_qaggint_params *
**************************************/
@@ -19716,79 +20622,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
uint8_t valid;
} __attribute__((packed));
-/*******************
- * hwrm_ring_reset *
- *******************/
-
-
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Ring Type. */
- uint8_t ring_type;
- /* L2 Completion Ring (CR) */
- #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
- #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
- #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2)
- /* RoCE Notification Completion Ring (ROCE_CR) */
- #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
- #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \
- HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL
- uint8_t unused_0;
- /* Physical number of the ring. */
- uint16_t ring_id;
- uint8_t unused_1[4];
-} __attribute__((packed));
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/***********************
* hwrm_ring_grp_alloc *
***********************/
@@ -20000,6 +20833,24 @@ struct hwrm_cfa_l2_filter_alloc_input {
*/
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \
UINT32_C(0x8)
+ /*
+ * Enumeration denoting NO_ROCE_L2 to support old drivers.
+ * New driver L2 for only L2 traffic, ROCE for roce and l2 traffic
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_MASK \
+ UINT32_C(0x30)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_SFT 4
+ /* To support old drivers */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_NO_ROCE_L2 \
+ (UINT32_C(0x0) << 4)
+ /* Only L2 traffic */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2 \
+ (UINT32_C(0x1) << 4)
+ /* Roce & L2 traffic */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_ROCE \
+ (UINT32_C(0x2) << 4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_ROCE
uint32_t enables;
/*
* This bit must be '1' for the l2_addr field to be
@@ -20419,18 +21270,40 @@ struct hwrm_cfa_l2_filter_cfg_input {
* This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
/*
* Setting of this flag indicates drop action. If this flag is not set,
* then it should be considered accept action.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP \
+ UINT32_C(0x2)
+ /*
+ * Enumeration denoting NO_ROCE_L2 to support old drivers.
+ * New driver L2 for only L2 traffic, ROCE for roce and l2 traffic
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_MASK \
+ UINT32_C(0xc)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_SFT 2
+ /* To support old drivers */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_NO_ROCE_L2 \
+ (UINT32_C(0x0) << 2)
+ /* Only L2 traffic */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_L2 \
+ (UINT32_C(0x1) << 2)
+ /* Roce & L2 traffic */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE \
+ (UINT32_C(0x2) << 2)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_LAST \
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE
uint32_t enables;
/*
* This bit must be '1' for the dst_id field to be
@@ -21532,7 +22405,7 @@ struct hwrm_vxlan_ipv6_hdr {
uint32_t dest_ip_addr[4];
} __attribute__((packed));
-/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
struct hwrm_cfa_encap_data_vxlan {
/* Source MAC address. */
uint8_t src_mac_addr[6];
@@ -21568,6 +22441,13 @@ struct hwrm_cfa_encap_data_vxlan {
uint16_t dst_port;
/* VXLAN Network Identifier. */
uint32_t vni;
+ /* 3 bytes VXLAN header reserve fields from 1st dword of the VXLAN header. */
+ uint8_t hdr_rsvd0[3];
+ /* 1 byte VXLAN header reserve field from 2nd dword of the VXLAN header. */
+ uint8_t hdr_rsvd1;
+ /* VXLAN header flags field. */
+ uint8_t hdr_flags;
+ uint8_t unused[3];
} __attribute__((packed));
/*******************************
@@ -21634,8 +22514,11 @@ struct hwrm_cfa_encap_record_alloc_input {
/* Generic Routing Encapsulation (GRE) inside IP datagram payload */
#define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE \
UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
#define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_LAST \
- HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE
+ HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN_V4
uint8_t unused_0[3];
/* This value is encap data used for the given encap type. */
uint32_t encap_data[20];
@@ -22703,922 +23586,6 @@ struct hwrm_cfa_em_flow_free_output {
uint8_t valid;
} __attribute__((packed));
-/************************
- * hwrm_cfa_em_flow_cfg *
- ************************/
-
-
-/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */
-struct hwrm_cfa_em_flow_cfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint32_t enables;
- /*
- * This bit must be '1' for the new_dst_id field to be
- * configured.
- */
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID \
- UINT32_C(0x1)
- /*
- * This bit must be '1' for the new_mirror_vnic_id field to be
- * configured.
- */
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
- UINT32_C(0x2)
- /*
- * This bit must be '1' for the new_meter_instance_id field to be
- * configured.
- */
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
- UINT32_C(0x4)
- uint8_t unused_0[4];
- /* This value is an opaque id into CFA data structures. */
- uint64_t em_filter_id;
- /*
- * If set, this value shall represent the new
- * Logical VNIC ID of the destination VNIC for the RX
- * path and network port id of the destination port for
- * the TX path.
- */
- uint32_t new_dst_id;
- /*
- * New Logical VNIC ID of the VNIC where traffic is
- * mirrored.
- */
- uint32_t new_mirror_vnic_id;
- /*
- * New meter to attach to the flow. Specifying the
- * invalid instance ID is used to remove any existing
- * meter from the flow.
- */
- uint16_t new_meter_instance_id;
- /*
- * A value of 0xfff is considered invalid and implies the
- * instance is not configured.
- */
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
- UINT32_C(0xffff)
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
- HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
- uint8_t unused_1[6];
-} __attribute__((packed));
-
-/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */
-struct hwrm_cfa_em_flow_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/********************************
- * hwrm_cfa_meter_profile_alloc *
- ********************************/
-
-
-/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */
-struct hwrm_cfa_meter_profile_alloc_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint8_t flags;
- /*
- * Enumeration denoting the RX, TX type of the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
- */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_TX \
- UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX \
- UINT32_C(0x1)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX
- /* The meter algorithm type. */
- uint8_t meter_type;
- /* RFC 2697 (srTCM) */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2697 \
- UINT32_C(0x0)
- /* RFC 2698 (trTCM) */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2698 \
- UINT32_C(0x1)
- /* RFC 4115 (trTCM) */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 \
- UINT32_C(0x2)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115
- /*
- * This field is reserved for the future use.
- * It shall be set to 0.
- */
- uint16_t reserved1;
- /*
- * This field is reserved for the future use.
- * It shall be set to 0.
- */
- uint32_t reserved2;
- /* A meter rate specified in bytes-per-second. */
- uint32_t commit_rate;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
- /* A meter burst size specified in bytes. */
- uint32_t commit_burst;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
- /* A meter rate specified in bytes-per-second. */
- uint32_t excess_peak_rate;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
- /* A meter burst size specified in bytes. */
- uint32_t excess_peak_burst;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
-} __attribute__((packed));
-
-/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */
-struct hwrm_cfa_meter_profile_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* This value identifies a meter profile in CFA. */
- uint16_t meter_profile_id;
- /*
- * A value of 0xfff is considered invalid and implies the
- * profile is not configured.
- */
- #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID \
- UINT32_C(0xffff)
- #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_LAST \
- HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID
- uint8_t unused_0[5];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/*******************************
- * hwrm_cfa_meter_profile_free *
- *******************************/
-
-
-/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */
-struct hwrm_cfa_meter_profile_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint8_t flags;
- /*
- * Enumeration denoting the RX, TX type of the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
- */
- #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_TX \
- UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX \
- UINT32_C(0x1)
- #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX
- uint8_t unused_0;
- /* This value identifies a meter profile in CFA. */
- uint16_t meter_profile_id;
- /*
- * A value of 0xfff is considered invalid and implies the
- * profile is not configured.
- */
- #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID \
- UINT32_C(0xffff)
- #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \
- HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID
- uint8_t unused_1[4];
-} __attribute__((packed));
-
-/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */
-struct hwrm_cfa_meter_profile_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/******************************
- * hwrm_cfa_meter_profile_cfg *
- ******************************/
-
-
-/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */
-struct hwrm_cfa_meter_profile_cfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint8_t flags;
- /*
- * Enumeration denoting the RX, TX type of the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
- */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX
- /* The meter algorithm type. */
- uint8_t meter_type;
- /* RFC 2697 (srTCM) */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2697 \
- UINT32_C(0x0)
- /* RFC 2698 (trTCM) */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2698 \
- UINT32_C(0x1)
- /* RFC 4115 (trTCM) */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 \
- UINT32_C(0x2)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115
- /* This value identifies a meter profile in CFA. */
- uint16_t meter_profile_id;
- /*
- * A value of 0xfff is considered invalid and implies the
- * profile is not configured.
- */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID \
- UINT32_C(0xffff)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID
- /*
- * This field is reserved for the future use.
- * It shall be set to 0.
- */
- uint32_t reserved;
- /* A meter rate specified in bytes-per-second. */
- uint32_t commit_rate;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
- /* A meter burst size specified in bytes. */
- uint32_t commit_burst;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
- /* A meter rate specified in bytes-per-second. */
- uint32_t excess_peak_rate;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
- /* A meter burst size specified in bytes. */
- uint32_t excess_peak_burst;
- /* The bandwidth value. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
- UINT32_C(0xfffffff)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
- 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE \
- UINT32_C(0x10000000)
- /* Value is in bits. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
- (UINT32_C(0x0) << 28)
- /* Value is in bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
- (UINT32_C(0x1) << 28)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
- /* bw_value_unit is 3 b */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
- UINT32_C(0xe0000000)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
- 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
- (UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
- (UINT32_C(0x2) << 29)
- /* Value is in bits or bytes. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
- (UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
- (UINT32_C(0x6) << 29)
- /* Value is in 1/100th of a percentage of total bandwidth. */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
- (UINT32_C(0x1) << 29)
- /* Invalid unit */
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
- (UINT32_C(0x7) << 29)
- #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
- HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
-} __attribute__((packed));
-
-/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */
-struct hwrm_cfa_meter_profile_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/*********************************
- * hwrm_cfa_meter_instance_alloc *
- *********************************/
-
-
-/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */
-struct hwrm_cfa_meter_instance_alloc_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint8_t flags;
- /*
- * Enumeration denoting the RX, TX type of the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
- */
- #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH \
- UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_TX \
- UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX \
- UINT32_C(0x1)
- #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX
- uint8_t unused_0;
- /* This value identifies a meter profile in CFA. */
- uint16_t meter_profile_id;
- /*
- * A value of 0xfff is considered invalid and implies the
- * profile is not configured.
- */
- #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID \
- UINT32_C(0xffff)
- #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \
- HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID
- uint8_t unused_1[4];
-} __attribute__((packed));
-
-/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */
-struct hwrm_cfa_meter_instance_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* This value identifies a meter instance in CFA. */
- uint16_t meter_instance_id;
- /*
- * A value of 0xfff is considered invalid and implies the
- * instance is not configured.
- */
- #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID \
- UINT32_C(0xffff)
- #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_LAST \
- HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID
- uint8_t unused_0[5];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/********************************
- * hwrm_cfa_meter_instance_free *
- ********************************/
-
-
-/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */
-struct hwrm_cfa_meter_instance_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint8_t flags;
- /*
- * Enumeration denoting the RX, TX type of the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
- */
- #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_TX \
- UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX \
- UINT32_C(0x1)
- #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX
- uint8_t unused_0;
- /* This value identifies a meter instance in CFA. */
- uint16_t meter_instance_id;
- /*
- * A value of 0xfff is considered invalid and implies the
- * instance is not configured.
- */
- #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID \
- UINT32_C(0xffff)
- #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \
- HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID
- uint8_t unused_1[4];
-} __attribute__((packed));
-
-/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */
-struct hwrm_cfa_meter_instance_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/*******************************
* hwrm_cfa_decap_filter_alloc *
*******************************/
@@ -24030,10 +23997,12 @@ struct hwrm_cfa_flow_alloc_input {
uint64_t resp_addr;
uint16_t flags;
/* tunnel is 1 b */
- #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL UINT32_C(0x1)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL \
+ UINT32_C(0x1)
/* num_vlan is 2 b */
- #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK UINT32_C(0x6)
- #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK \
+ UINT32_C(0x6)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1
/* no tags */
#define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_NONE \
(UINT32_C(0x0) << 1)
@@ -24046,8 +24015,9 @@ struct hwrm_cfa_flow_alloc_input {
#define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_LAST \
HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO
/* Enumeration denoting the Flow Type. */
- #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK UINT32_C(0x38)
- #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK \
+ UINT32_C(0x38)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3
/* L2 flow */
#define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_L2 \
(UINT32_C(0x0) << 3)
@@ -24060,6 +24030,29 @@ struct hwrm_cfa_flow_alloc_input {
#define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_LAST \
HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6
/*
+ * when set to 1, indicates TX flow offload for function specified in src_fid and
+ * the dst_fid should be set to invalid value. To indicate a VM to VM flow, both
+ * of the path_tx and path_rx flags need to be set. For virtio vSwitch offload
+ * case, the src_fid and dst_fid is set to the same fid value. For the SRIOV
+ * vSwitch offload case, the src_fid and dst_fid must be set to the same VF FID
+ * belong to the children VFs of the same PF to indicate VM to VM flow.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x40)
+ /*
+ * when set to 1, indicates RX flow offload for function specified in dst_fid and
+ * the src_fid should be set to invalid value.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x80)
+ /*
+ * Set to 1 to indicate matching of VXLAN VNI from the custom vxlan header is
+ * required and the VXLAN VNI value is stored in the first 24 bits of the dmac field.
+ * This flag is only valid when the flow direction is RX.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_MATCH_VXLAN_IP_VNI \
+ UINT32_C(0x100)
+ /*
* Tx Flow: vf fid.
* Rx Flow: pf fid.
*/
@@ -24104,6 +24097,14 @@ struct hwrm_cfa_flow_alloc_input {
#define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TTL_DECREMENT \
UINT32_C(0x200)
/*
+ * If set to 1 and flow direction is TX, it indicates decap of L2 header
+ * and encap of tunnel header. If set to 1 and flow direction is RX, it
+ * indicates decap of tunnel header and encap L2 header. The type of tunnel
+ * is specified in the tunnel_type field.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL_IP \
+ UINT32_C(0x400)
+ /*
* Tx Flow: pf or vf fid.
* Rx Flow: vf fid.
*/
@@ -24171,10 +24172,35 @@ struct hwrm_cfa_flow_alloc_input {
uint16_t l2_rewrite_smac[3];
/* The value of ip protocol. */
uint8_t ip_proto;
- uint8_t unused_0;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL UINT32_C(0xff)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
} __attribute__((packed));
-/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
struct hwrm_cfa_flow_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
@@ -24186,7 +24212,18 @@ struct hwrm_cfa_flow_alloc_output {
uint16_t resp_len;
/* Flow record index. */
uint16_t flow_handle;
- uint8_t unused_0[5];
+ uint8_t unused_0[2];
+ /*
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint32_t flow_id;
+ /* This value identifies a set of CFA data structures used for a flow. */
+ uint64_t ext_flow_handle;
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -24202,7 +24239,7 @@ struct hwrm_cfa_flow_alloc_output {
**********************/
-/* hwrm_cfa_flow_free_input (size:192b/24B) */
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -24234,6 +24271,8 @@ struct hwrm_cfa_flow_free_input {
/* Flow record index. */
uint16_t flow_handle;
uint8_t unused_0[6];
+ /* This value identifies a set of CFA data structures used for a flow. */
+ uint64_t ext_flow_handle;
} __attribute__((packed));
/* hwrm_cfa_flow_free_output (size:256b/32B) */
@@ -24261,98 +24300,6 @@ struct hwrm_cfa_flow_free_output {
uint8_t valid;
} __attribute__((packed));
-/**********************
- * hwrm_cfa_flow_info *
- **********************/
-
-
-/* hwrm_cfa_flow_info_input (size:192b/24B) */
-struct hwrm_cfa_flow_info_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Flow record index. */
- uint16_t flow_handle;
- /* Max flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \
- UINT32_C(0xfff)
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_SFT 0
- /* CNP flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \
- UINT32_C(0x1000)
- /* Direction rx = 1 */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \
- UINT32_C(0x8000)
- uint8_t unused_0[6];
-} __attribute__((packed));
-
-/* hwrm_cfa_flow_info_output (size:448b/56B) */
-struct hwrm_cfa_flow_info_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* flags is 8 b */
- uint8_t flags;
- /* profile is 8 b */
- uint8_t profile;
- /* src_fid is 16 b */
- uint16_t src_fid;
- /* dst_fid is 16 b */
- uint16_t dst_fid;
- /* l2_ctxt_id is 16 b */
- uint16_t l2_ctxt_id;
- /* em_info is 64 b */
- uint64_t em_info;
- /* tcam_info is 64 b */
- uint64_t tcam_info;
- /* vfp_tcam_info is 64 b */
- uint64_t vfp_tcam_info;
- /* ar_id is 16 b */
- uint16_t ar_id;
- /* flow_handle is 16 b */
- uint16_t flow_handle;
- /* tunnel_handle is 32 b */
- uint32_t tunnel_handle;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/***********************
* hwrm_cfa_flow_flush *
***********************/
@@ -24417,7 +24364,7 @@ struct hwrm_cfa_flow_flush_output {
***********************/
-/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
struct hwrm_cfa_flow_stats_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -24469,6 +24416,26 @@ struct hwrm_cfa_flow_stats_input {
/* Flow handle. */
uint16_t flow_handle_9;
uint8_t unused_0[2];
+ /* Flow ID of a flow. */
+ uint32_t flow_id_0;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_1;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_2;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_3;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_4;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_5;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_6;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_7;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_8;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_9;
} __attribute__((packed));
/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
@@ -24532,421 +24499,6 @@ struct hwrm_cfa_flow_stats_output {
uint8_t valid;
} __attribute__((packed));
-/**************************
- * hwrm_cfa_vf_pair_alloc *
- **************************/
-
-
-/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vf_pair_alloc_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_a_id;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_b_id;
- uint8_t unused_0[4];
- /* VF Pair name (32 byte string). */
- char pair_name[32];
-} __attribute__((packed));
-
-/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vf_pair_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/*************************
- * hwrm_cfa_vf_pair_free *
- *************************/
-
-
-/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */
-struct hwrm_cfa_vf_pair_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* VF Pair name (32 byte string). */
- char pair_name[32];
-} __attribute__((packed));
-
-/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */
-struct hwrm_cfa_vf_pair_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/*************************
- * hwrm_cfa_vf_pair_info *
- *************************/
-
-
-/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */
-struct hwrm_cfa_vf_pair_info_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- uint32_t flags;
- /* If this flag is set, lookup by name else lookup by index. */
- #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
- /* vf pair table index. */
- uint16_t vf_pair_index;
- uint8_t unused_0[2];
- /* VF Pair name (32 byte string). */
- char vf_pair_name[32];
-} __attribute__((packed));
-
-/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */
-struct hwrm_cfa_vf_pair_info_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* vf pair table index. */
- uint16_t next_vf_pair_index;
- /* vf pair member a's vf_fid. */
- uint16_t vf_a_fid;
- /* vf pair member a's Linux logical VF number. */
- uint16_t vf_a_index;
- /* vf pair member b's vf_fid. */
- uint16_t vf_b_fid;
- /* vf pair member a's Linux logical VF number. */
- uint16_t vf_b_index;
- /* vf pair state. */
- uint8_t pair_state;
- /* Pair has been allocated */
- #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
- /* Both pair members are active */
- #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
- #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
- HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
- uint8_t unused_0[5];
- /* VF Pair name (32 byte string). */
- char pair_name[32];
- uint8_t unused_1[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/***********************
- * hwrm_cfa_pair_alloc *
- ***********************/
-
-
-/* hwrm_cfa_pair_alloc_input (size:576b/72B) */
-struct hwrm_cfa_pair_alloc_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair, 5-rep2fn_mod). */
- uint8_t pair_mode;
- /* Pair between VF on local host with PF or VF on specified host. */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
- /* Pair between REP on local host with PF or VF on specified host. */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
- /* Pair between REP on local host with REP on specified host. */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
- /* Pair for the proxy interface. */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY UINT32_C(0x3)
- /* Pair for the PF interface. */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
- /* Modify exiting rep2fn pair and move pair to new PF. */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5)
- /* Modify exiting rep2fn pairs paired with same PF and move pairs to new PF. */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6)
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \
- HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL
- uint8_t unused_0;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_a_id;
- /* Logical Host (0xff-local host). */
- uint8_t host_b_id;
- /* Logical PF (0xff-PF for command channel). */
- uint8_t pf_b_id;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_b_id;
- /* Loopback port (0xff-internal loopback), valid for mode-3. */
- uint8_t port_id;
- /* Priority used for encap of loopback packets valid for mode-3. */
- uint8_t pri;
- /* New PF for rep2fn modify, valid for mode 5. */
- uint16_t new_pf_fid;
- uint32_t enables;
- /*
- * This bit must be '1' for the q_ab field to be
- * configured.
- */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1)
- /*
- * This bit must be '1' for the q_ba field to be
- * configured.
- */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2)
- /*
- * This bit must be '1' for the fc_ab field to be
- * configured.
- */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4)
- /*
- * This bit must be '1' for the fc_ba field to be
- * configured.
- */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8)
- /* VF Pair name (32 byte string). */
- char pair_name[32];
- /*
- * The q_ab value specifies the logical index of the TX/RX CoS
- * queue to be assigned for traffic in the A to B direction of
- * the interface pair. The default value is 0.
- */
- uint8_t q_ab;
- /*
- * The q_ba value specifies the logical index of the TX/RX CoS
- * queue to be assigned for traffic in the B to A direction of
- * the interface pair. The default value is 1.
- */
- uint8_t q_ba;
- /*
- * Specifies whether RX ring flow control is disabled (0) or enabled
- * (1) in the A to B direction. The default value is 0, meaning that
- * packets will be dropped when the B-side RX rings are full.
- */
- uint8_t fc_ab;
- /*
- * Specifies whether RX ring flow control is disabled (0) or enabled
- * (1) in the B to A direction. The default value is 1, meaning that
- * the RX CoS queue will be flow controlled when the A-side RX rings
- * are full.
- */
- uint8_t fc_ba;
- uint8_t unused_1[4];
-} __attribute__((packed));
-
-/* hwrm_cfa_pair_alloc_output (size:192b/24B) */
-struct hwrm_cfa_pair_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Only valid for modes 1 and 2. */
- uint16_t rx_cfa_code_a;
- /* Only valid for modes 1 and 2. */
- uint16_t tx_cfa_action_a;
- /* Only valid for mode 2. */
- uint16_t rx_cfa_code_b;
- /* Only valid for mode 2. */
- uint16_t tx_cfa_action_b;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/**********************
- * hwrm_cfa_pair_free *
- **********************/
-
-
-/* hwrm_cfa_pair_free_input (size:384b/48B) */
-struct hwrm_cfa_pair_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* VF Pair name (32 byte string). */
- char pair_name[32];
-} __attribute__((packed));
-
-/* hwrm_cfa_pair_free_output (size:128b/16B) */
-struct hwrm_cfa_pair_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/**********************
* hwrm_cfa_pair_info *
**********************/
@@ -25067,13 +24619,13 @@ struct hwrm_cfa_pair_info_output {
uint8_t valid;
} __attribute__((packed));
-/**********************
- * hwrm_cfa_vfr_alloc *
- **********************/
+/***************************************
+ * hwrm_cfa_redirect_query_tunnel_type *
+ ***************************************/
-/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vfr_alloc_input {
+/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */
+struct hwrm_cfa_redirect_query_tunnel_type_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -25101,20 +24653,13 @@ struct hwrm_cfa_vfr_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_id;
- /*
- * This field is reserved for the future use.
- * It shall be set to 0.
- */
- uint16_t reserved;
- uint8_t unused_0[4];
- /* VF Representor name (32 byte string). */
- char vfr_name[32];
+ /* The source function id. */
+ uint16_t src_fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vfr_alloc_output {
+/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */
+struct hwrm_cfa_redirect_query_tunnel_type_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -25123,10 +24668,44 @@ struct hwrm_cfa_vfr_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Rx CFA code. */
- uint16_t rx_cfa_code;
- /* Tx CFA action. */
- uint16_t tx_cfa_action;
+ /* Tunnel Mask. */
+ uint32_t tunnel_mask;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NONTUNNEL \
+ UINT32_C(0x1)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN \
+ UINT32_C(0x2)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NVGRE \
+ UINT32_C(0x4)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_L2GRE \
+ UINT32_C(0x8)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPIP \
+ UINT32_C(0x10)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_GENEVE \
+ UINT32_C(0x20)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_MPLS \
+ UINT32_C(0x40)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_STT \
+ UINT32_C(0x80)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE \
+ UINT32_C(0x100)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_V4 \
+ UINT32_C(0x200)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE_V1 \
+ UINT32_C(0x400)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_ANYTUNNEL \
+ UINT32_C(0x800)
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
@@ -25138,65 +24717,6 @@ struct hwrm_cfa_vfr_alloc_output {
uint8_t valid;
} __attribute__((packed));
-/*********************
- * hwrm_cfa_vfr_free *
- *********************/
-
-
-/* hwrm_cfa_vfr_free_input (size:384b/48B) */
-struct hwrm_cfa_vfr_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* VF Representor name (32 byte string). */
- char vfr_name[32];
-} __attribute__((packed));
-
-/* hwrm_cfa_vfr_free_output (size:128b/16B) */
-struct hwrm_cfa_vfr_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
/******************************
* hwrm_tunnel_dst_port_query *
******************************/
@@ -25460,6 +24980,7 @@ struct hwrm_tunnel_dst_port_free_output {
uint8_t valid;
} __attribute__((packed));
+/* Periodic statistics context DMA to host. */
/* ctx_hw_stats (size:1280b/160B) */
struct ctx_hw_stats {
/* Number of received unicast packets */
@@ -25882,377 +25403,6 @@ struct hwrm_pcie_qstats_output {
uint8_t valid;
} __attribute__((packed));
-/* Port Tx Statistics Formats */
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- /* Total Number of 64 Bytes frames transmitted */
- uint64_t tx_64b_frames;
- /* Total Number of 65-127 Bytes frames transmitted */
- uint64_t tx_65b_127b_frames;
- /* Total Number of 128-255 Bytes frames transmitted */
- uint64_t tx_128b_255b_frames;
- /* Total Number of 256-511 Bytes frames transmitted */
- uint64_t tx_256b_511b_frames;
- /* Total Number of 512-1023 Bytes frames transmitted */
- uint64_t tx_512b_1023b_frames;
- /* Total Number of 1024-1518 Bytes frames transmitted */
- uint64_t tx_1024b_1518_frames;
- /*
- * Total Number of each good VLAN (exludes FCS errors)
- * frame transmitted which is 1519 to 1522 bytes in length
- * inclusive (excluding framing bits but including FCS bytes).
- */
- uint64_t tx_good_vlan_frames;
- /* Total Number of 1519-2047 Bytes frames transmitted */
- uint64_t tx_1519b_2047_frames;
- /* Total Number of 2048-4095 Bytes frames transmitted */
- uint64_t tx_2048b_4095b_frames;
- /* Total Number of 4096-9216 Bytes frames transmitted */
- uint64_t tx_4096b_9216b_frames;
- /* Total Number of 9217-16383 Bytes frames transmitted */
- uint64_t tx_9217b_16383b_frames;
- /* Total Number of good frames transmitted */
- uint64_t tx_good_frames;
- /* Total Number of frames transmitted */
- uint64_t tx_total_frames;
- /* Total number of unicast frames transmitted */
- uint64_t tx_ucast_frames;
- /* Total number of multicast frames transmitted */
- uint64_t tx_mcast_frames;
- /* Total number of broadcast frames transmitted */
- uint64_t tx_bcast_frames;
- /* Total number of PAUSE control frames transmitted */
- uint64_t tx_pause_frames;
- /*
- * Total number of PFC/per-priority PAUSE
- * control frames transmitted
- */
- uint64_t tx_pfc_frames;
- /* Total number of jabber frames transmitted */
- uint64_t tx_jabber_frames;
- /* Total number of frames transmitted with FCS error */
- uint64_t tx_fcs_err_frames;
- /* Total number of control frames transmitted */
- uint64_t tx_control_frames;
- /* Total number of over-sized frames transmitted */
- uint64_t tx_oversz_frames;
- /* Total number of frames with single deferral */
- uint64_t tx_single_dfrl_frames;
- /* Total number of frames with multiple deferrals */
- uint64_t tx_multi_dfrl_frames;
- /* Total number of frames with single collision */
- uint64_t tx_single_coll_frames;
- /* Total number of frames with multiple collisions */
- uint64_t tx_multi_coll_frames;
- /* Total number of frames with late collisions */
- uint64_t tx_late_coll_frames;
- /* Total number of frames with excessive collisions */
- uint64_t tx_excessive_coll_frames;
- /* Total number of fragmented frames transmitted */
- uint64_t tx_frag_frames;
- /* Total number of transmit errors */
- uint64_t tx_err;
- /* Total number of single VLAN tagged frames transmitted */
- uint64_t tx_tagged_frames;
- /* Total number of double VLAN tagged frames transmitted */
- uint64_t tx_dbl_tagged_frames;
- /* Total number of runt frames transmitted */
- uint64_t tx_runt_frames;
- /* Total number of TX FIFO under runs */
- uint64_t tx_fifo_underruns;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 0 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri0;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 1 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri1;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 2 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri2;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 3 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri3;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 4 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri4;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 5 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri5;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 6 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri6;
- /*
- * Total number of PFC frames with PFC enabled bit for
- * Pri 7 transmitted
- */
- uint64_t tx_pfc_ena_frames_pri7;
- /* Total number of EEE LPI Events on TX */
- uint64_t tx_eee_lpi_events;
- /* EEE LPI Duration Counter on TX */
- uint64_t tx_eee_lpi_duration;
- /*
- * Total number of Link Level Flow Control (LLFC) messages
- * transmitted
- */
- uint64_t tx_llfc_logical_msgs;
- /* Total number of HCFC messages transmitted */
- uint64_t tx_hcfc_msgs;
- /* Total number of TX collisions */
- uint64_t tx_total_collisions;
- /* Total number of transmitted bytes */
- uint64_t tx_bytes;
- /* Total number of end-to-end HOL frames */
- uint64_t tx_xthol_frames;
- /* Total Tx Drops per Port reported by STATS block */
- uint64_t tx_stat_discard;
- /* Total Tx Error Drops per Port reported by STATS block */
- uint64_t tx_stat_error;
-} __attribute__((packed));
-
-/* Port Rx Statistics Formats */
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- /* Total Number of 64 Bytes frames received */
- uint64_t rx_64b_frames;
- /* Total Number of 65-127 Bytes frames received */
- uint64_t rx_65b_127b_frames;
- /* Total Number of 128-255 Bytes frames received */
- uint64_t rx_128b_255b_frames;
- /* Total Number of 256-511 Bytes frames received */
- uint64_t rx_256b_511b_frames;
- /* Total Number of 512-1023 Bytes frames received */
- uint64_t rx_512b_1023b_frames;
- /* Total Number of 1024-1518 Bytes frames received */
- uint64_t rx_1024b_1518_frames;
- /*
- * Total Number of each good VLAN (exludes FCS errors)
- * frame received which is 1519 to 1522 bytes in length
- * inclusive (excluding framing bits but including FCS bytes).
- */
- uint64_t rx_good_vlan_frames;
- /* Total Number of 1519-2047 Bytes frames received */
- uint64_t rx_1519b_2047b_frames;
- /* Total Number of 2048-4095 Bytes frames received */
- uint64_t rx_2048b_4095b_frames;
- /* Total Number of 4096-9216 Bytes frames received */
- uint64_t rx_4096b_9216b_frames;
- /* Total Number of 9217-16383 Bytes frames received */
- uint64_t rx_9217b_16383b_frames;
- /* Total number of frames received */
- uint64_t rx_total_frames;
- /* Total number of unicast frames received */
- uint64_t rx_ucast_frames;
- /* Total number of multicast frames received */
- uint64_t rx_mcast_frames;
- /* Total number of broadcast frames received */
- uint64_t rx_bcast_frames;
- /* Total number of received frames with FCS error */
- uint64_t rx_fcs_err_frames;
- /* Total number of control frames received */
- uint64_t rx_ctrl_frames;
- /* Total number of PAUSE frames received */
- uint64_t rx_pause_frames;
- /* Total number of PFC frames received */
- uint64_t rx_pfc_frames;
- /*
- * Total number of frames received with an unsupported
- * opcode
- */
- uint64_t rx_unsupported_opcode_frames;
- /*
- * Total number of frames received with an unsupported
- * DA for pause and PFC
- */
- uint64_t rx_unsupported_da_pausepfc_frames;
- /* Total number of frames received with an unsupported SA */
- uint64_t rx_wrong_sa_frames;
- /* Total number of received packets with alignment error */
- uint64_t rx_align_err_frames;
- /* Total number of received frames with out-of-range length */
- uint64_t rx_oor_len_frames;
- /* Total number of received frames with error termination */
- uint64_t rx_code_err_frames;
- /*
- * Total number of received frames with a false carrier is
- * detected during idle, as defined by RX_ER samples active
- * and RXD is 0xE. The event is reported along with the
- * statistics generated on the next received frame. Only
- * one false carrier condition can be detected and logged
- * between frames.
- *
- * Carrier event, valid for 10M/100M speed modes only.
- */
- uint64_t rx_false_carrier_frames;
- /* Total number of over-sized frames received */
- uint64_t rx_ovrsz_frames;
- /* Total number of jabber packets received */
- uint64_t rx_jbr_frames;
- /* Total number of received frames with MTU error */
- uint64_t rx_mtu_err_frames;
- /* Total number of received frames with CRC match */
- uint64_t rx_match_crc_frames;
- /* Total number of frames received promiscuously */
- uint64_t rx_promiscuous_frames;
- /*
- * Total number of received frames with one or two VLAN
- * tags
- */
- uint64_t rx_tagged_frames;
- /* Total number of received frames with two VLAN tags */
- uint64_t rx_double_tagged_frames;
- /* Total number of truncated frames received */
- uint64_t rx_trunc_frames;
- /* Total number of good frames (without errors) received */
- uint64_t rx_good_frames;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 0
- */
- uint64_t rx_pfc_xon2xoff_frames_pri0;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 1
- */
- uint64_t rx_pfc_xon2xoff_frames_pri1;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 2
- */
- uint64_t rx_pfc_xon2xoff_frames_pri2;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 3
- */
- uint64_t rx_pfc_xon2xoff_frames_pri3;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 4
- */
- uint64_t rx_pfc_xon2xoff_frames_pri4;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 5
- */
- uint64_t rx_pfc_xon2xoff_frames_pri5;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 6
- */
- uint64_t rx_pfc_xon2xoff_frames_pri6;
- /*
- * Total number of received PFC frames with transition from
- * XON to XOFF on Pri 7
- */
- uint64_t rx_pfc_xon2xoff_frames_pri7;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 0
- */
- uint64_t rx_pfc_ena_frames_pri0;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 1
- */
- uint64_t rx_pfc_ena_frames_pri1;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 2
- */
- uint64_t rx_pfc_ena_frames_pri2;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 3
- */
- uint64_t rx_pfc_ena_frames_pri3;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 4
- */
- uint64_t rx_pfc_ena_frames_pri4;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 5
- */
- uint64_t rx_pfc_ena_frames_pri5;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 6
- */
- uint64_t rx_pfc_ena_frames_pri6;
- /*
- * Total number of received PFC frames with PFC enabled
- * bit for Pri 7
- */
- uint64_t rx_pfc_ena_frames_pri7;
- /* Total Number of frames received with SCH CRC error */
- uint64_t rx_sch_crc_err_frames;
- /* Total Number of under-sized frames received */
- uint64_t rx_undrsz_frames;
- /* Total Number of fragmented frames received */
- uint64_t rx_frag_frames;
- /* Total number of RX EEE LPI Events */
- uint64_t rx_eee_lpi_events;
- /* EEE LPI Duration Counter on RX */
- uint64_t rx_eee_lpi_duration;
- /*
- * Total number of physical type Link Level Flow Control
- * (LLFC) messages received
- */
- uint64_t rx_llfc_physical_msgs;
- /*
- * Total number of logical type Link Level Flow Control
- * (LLFC) messages received
- */
- uint64_t rx_llfc_logical_msgs;
- /*
- * Total number of logical type Link Level Flow Control
- * (LLFC) messages received with CRC error
- */
- uint64_t rx_llfc_msgs_with_crc_err;
- /* Total number of HCFC messages received */
- uint64_t rx_hcfc_msgs;
- /* Total number of HCFC messages received with CRC error */
- uint64_t rx_hcfc_msgs_with_crc_err;
- /* Total number of received bytes */
- uint64_t rx_bytes;
- /* Total number of bytes received in runt frames */
- uint64_t rx_runt_bytes;
- /* Total number of runt frames received */
- uint64_t rx_runt_frames;
- /* Total Rx Discards per Port reported by STATS block */
- uint64_t rx_stat_discard;
- uint64_t rx_stat_err;
-} __attribute__((packed));
-
-/* Port Rx Statistics extended Formats */
-/* rx_port_stats_ext (size:320b/40B) */
-struct rx_port_stats_ext {
- /* Number of times link state changed to down */
- uint64_t link_down_events;
- /* Number of times the idle rings with pause bit are found */
- uint64_t continuous_pause_events;
- /* Number of times the active rings pause bit resumed back */
- uint64_t resume_pause_events;
- /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */
- uint64_t continuous_roce_pause_events;
- /* Number of times, the ROCE cos queue PFC is enabled back */
- uint64_t resume_roce_pause_events;
-} __attribute__((packed));
-
/* PCIe Statistics Formats */
/* pcie_ctx_hw_stats (size:768b/96B) */
struct pcie_ctx_hw_stats {
@@ -28109,103 +27259,4 @@ struct hwrm_nvm_validate_option_cmd_err {
uint8_t unused_0[7];
} __attribute__((packed));
-/*****************************
- * hwrm_nvm_factory_defaults *
- *****************************/
-
-
-/* hwrm_nvm_factory_defaults_input (size:192b/24B) */
-struct hwrm_nvm_factory_defaults_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFE - Reserved for internal processors
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* mode is 8 b */
- uint8_t mode;
- /* If set to 1, it will trigger restoration of factory default settings */
- #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_RESTORE UINT32_C(0x0)
- /* If set to 1, it will trigger creation of factory default settings */
- #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE UINT32_C(0x1)
- #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_LAST \
- HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE
- uint8_t unused_0[7];
-} __attribute__((packed));
-
-/* hwrm_nvm_factory_defaults_output (size:128b/16B) */
-struct hwrm_nvm_factory_defaults_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t result;
- /* factory defaults created successfully. */
- #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_OK \
- UINT32_C(0x0)
- /* factory defaults restored successfully. */
- #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_RESTORE_OK \
- UINT32_C(0x1)
- /* factory defaults already created. */
- #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY \
- UINT32_C(0x2)
- #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_LAST \
- HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY
- uint8_t unused_0[6];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} __attribute__((packed));
-
-/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */
-struct hwrm_nvm_factory_defaults_cmd_err {
- /*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
- */
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN \
- UINT32_C(0x0)
- /* valid configuration not present to create defaults */
- #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG \
- UINT32_C(0x1)
- /* No saved configuration present to restore, restore failed */
- #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG \
- UINT32_C(0x2)
- #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST \
- HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG
- uint8_t unused_0[7];
-} __attribute__((packed));
-
#endif /* _HSI_STRUCT_DEF_DPDK_H_ */
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index acad16a1..1893e3ca 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -8,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_bond.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
diff --git a/drivers/net/bonding/meson.build b/drivers/net/bonding/meson.build
index 602d2880..00374edb 100644
--- a/drivers/net/bonding/meson.build
+++ b/drivers/net/bonding/meson.build
@@ -3,6 +3,7 @@
name = 'bond' #, james bond :-)
version = 2
+allow_experimental_apis = true
sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c',
'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c')
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index f8cea4b6..dd847c6f 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -130,7 +130,7 @@ static const struct ether_addr lacp_mac_addr = {
.addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
};
-struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
+struct port bond_mode_8023ad_ports[RTE_MAX_ETHPORTS];
static void
timer_cancel(uint64_t *timer)
@@ -187,7 +187,7 @@ set_warning_flags(struct port *port, uint16_t flags)
static void
show_warnings(uint16_t slave_id)
{
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
uint8_t warnings;
do {
@@ -260,7 +260,7 @@ static void
rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
struct lacpdu *lacp)
{
- struct port *agg, *port = &mode_8023ad_ports[slave_id];
+ struct port *agg, *port = &bond_mode_8023ad_ports[slave_id];
uint64_t timeout;
if (SM_FLAG(port, BEGIN)) {
@@ -319,7 +319,7 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
ACTOR_STATE_CLR(port, DEFAULTED);
/* If LACP partner params match this port actor params */
- agg = &mode_8023ad_ports[port->aggregator_port_id];
+ agg = &bond_mode_8023ad_ports[port->aggregator_port_id];
bool match = port->actor.system_priority ==
lacp->partner.port_params.system_priority &&
is_same_ether_addr(&agg->actor.system,
@@ -380,7 +380,7 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
static void
periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
/* Calculate if either site is LACP enabled */
uint64_t timeout;
uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
@@ -442,7 +442,7 @@ periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
static void
mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
/* Save current state for later use */
const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
@@ -545,7 +545,7 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
static void
tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
- struct port *agg, *port = &mode_8023ad_ports[slave_id];
+ struct port *agg, *port = &bond_mode_8023ad_ports[slave_id];
struct rte_mbuf *lacp_pkt = NULL;
struct lacpdu_header *hdr;
@@ -591,7 +591,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
sizeof(port->actor));
- agg = &mode_8023ad_ports[port->aggregator_port_id];
+ agg = &bond_mode_8023ad_ports[port->aggregator_port_id];
ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
lacpdu->actor.state = port->actor_state;
@@ -677,11 +677,11 @@ selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
slaves = internals->active_slaves;
slaves_count = internals->active_slave_count;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
/* Search for aggregator suitable for this port */
for (i = 0; i < slaves_count; ++i) {
- agg = &mode_8023ad_ports[slaves[i]];
+ agg = &bond_mode_8023ad_ports[slaves[i]];
/* Skip ports that are not aggreagators */
if (agg->aggregator_port_id != slaves[i])
continue;
@@ -824,7 +824,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
} else
key = 0;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
key = rte_cpu_to_be_16(key);
if (key != port->actor.key) {
@@ -844,7 +844,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
for (i = 0; i < internals->active_slave_count; i++) {
slave_id = internals->active_slaves[i];
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if ((port->actor.key &
rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
@@ -907,7 +907,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
{
struct bond_dev_private *internals = bond_dev->data->dev_private;
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
struct port_params initial = {
.system = { { 0 } },
.system_priority = rte_cpu_to_be_16(0xFFFF),
@@ -1008,7 +1008,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
struct port *port = NULL;
uint8_t old_partner_state;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
ACTOR_STATE_CLR(port, AGGREGATION);
port->selected = UNSELECTED;
@@ -1045,7 +1045,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
for (i = 0; i < internals->active_slave_count; i++) {
slave_id = internals->active_slaves[i];
- slave = &mode_8023ad_ports[slave_id];
+ slave = &bond_mode_8023ad_ports[slave_id];
rte_eth_macaddr_get(slave_id, &slave_addr);
if (is_same_ether_addr(&slave_addr, &slave->actor.system))
@@ -1058,7 +1058,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
continue;
for (j = 0; j < internals->active_slave_count; j++) {
- agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
+ agg_slave = &bond_mode_8023ad_ports[internals->active_slaves[j]];
if (agg_slave->aggregator_port_id == slave_id)
SM_FLAG_SET(agg_slave, NTT);
}
@@ -1191,7 +1191,7 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
uint16_t slave_id, struct rte_mbuf *pkt)
{
struct mode8023ad_private *mode4 = &internals->mode4;
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
struct marker_header *m_hdr;
uint64_t marker_timer, old_marker_timer;
int retval;
@@ -1395,7 +1395,7 @@ rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
internals->active_slave_count)
return -EINVAL;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
info->selected = port->selected;
info->actor_state = port->actor_state;
@@ -1447,7 +1447,7 @@ rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
if (res != 0)
return res;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (enabled)
ACTOR_STATE_SET(port, COLLECTING);
@@ -1468,7 +1468,7 @@ rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
if (res != 0)
return res;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (enabled)
ACTOR_STATE_SET(port, DISTRIBUTING);
@@ -1488,7 +1488,7 @@ rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
if (err != 0)
return err;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
return ACTOR_STATE(port, DISTRIBUTING);
}
@@ -1502,7 +1502,7 @@ rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
if (err != 0)
return err;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
return ACTOR_STATE(port, COLLECTING);
}
@@ -1517,7 +1517,7 @@ rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
if (res != 0)
return res;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
return -EINVAL;
@@ -1546,7 +1546,7 @@ bond_mode_8023ad_ext_periodic_cb(void *arg)
for (i = 0; i < internals->active_slave_count; i++) {
slave_id = internals->active_slaves[i];
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
struct rte_mbuf *lacp_pkt = pkt;
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index 0f490a51..c51426b8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -174,7 +174,7 @@ struct mode8023ad_private {
* The pool of *port* structures. The size of the pool
* is configured at compile-time in the <rte_eth_bond_8023ad.c> file.
*/
-extern struct port mode_8023ad_ports[];
+extern struct port bond_mode_8023ad_ports[];
/* Forward declaration */
struct bond_dev_private;
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 8bc04cfd..21bcd504 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -245,9 +245,9 @@ slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
}
TAILQ_FOREACH(flow, &internals->flow_list, next) {
flow->flows[slave_id] = rte_flow_create(slave_port_id,
- &flow->fd->attr,
- flow->fd->items,
- flow->fd->actions,
+ flow->rule.attr,
+ flow->rule.pattern,
+ flow->rule.actions,
&ferror);
if (flow->flows[slave_id] == NULL) {
RTE_BOND_LOG(ERR, "Cannot create flow for slave"
@@ -269,6 +269,173 @@ slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
return 0;
}
+static void
+eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
+
+ internals->reta_size = di->reta_size;
+
+ /* Inherit Rx offload capabilities from the first slave device */
+ internals->rx_offload_capa = di->rx_offload_capa;
+ internals->rx_queue_offload_capa = di->rx_queue_offload_capa;
+ internals->flow_type_rss_offloads = di->flow_type_rss_offloads;
+
+ /* Inherit maximum Rx packet size from the first slave device */
+ internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
+
+ /* Inherit default Rx queue settings from the first slave device */
+ memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i));
+
+ /*
+ * Turn off descriptor prefetch and writeback by default for all
+ * slave devices. Applications may tweak this setting if need be.
+ */
+ rxconf_i->rx_thresh.pthresh = 0;
+ rxconf_i->rx_thresh.hthresh = 0;
+ rxconf_i->rx_thresh.wthresh = 0;
+
+ /* Setting this to zero should effectively enable default values */
+ rxconf_i->rx_free_thresh = 0;
+
+ /* Disable deferred start by default for all slave devices */
+ rxconf_i->rx_deferred_start = 0;
+}
+
+static void
+eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_txconf *txconf_i = &internals->default_txconf;
+
+ /* Inherit Tx offload capabilities from the first slave device */
+ internals->tx_offload_capa = di->tx_offload_capa;
+ internals->tx_queue_offload_capa = di->tx_queue_offload_capa;
+
+ /* Inherit default Tx queue settings from the first slave device */
+ memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i));
+
+ /*
+ * Turn off descriptor prefetch and writeback by default for all
+ * slave devices. Applications may tweak this setting if need be.
+ */
+ txconf_i->tx_thresh.pthresh = 0;
+ txconf_i->tx_thresh.hthresh = 0;
+ txconf_i->tx_thresh.wthresh = 0;
+
+ /*
+ * Setting these parameters to zero assumes that default
+ * values will be configured implicitly by slave devices.
+ */
+ txconf_i->tx_free_thresh = 0;
+ txconf_i->tx_rs_thresh = 0;
+
+ /* Disable deferred start by default for all slave devices */
+ txconf_i->tx_deferred_start = 0;
+}
+
+static void
+eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
+ const struct rte_eth_rxconf *rxconf = &di->default_rxconf;
+
+ internals->rx_offload_capa &= di->rx_offload_capa;
+ internals->rx_queue_offload_capa &= di->rx_queue_offload_capa;
+ internals->flow_type_rss_offloads &= di->flow_type_rss_offloads;
+
+ /*
+ * If at least one slave device suggests enabling this
+ * setting by default, enable it for all slave devices
+ * since disabling it may not be necessarily supported.
+ */
+ if (rxconf->rx_drop_en == 1)
+ rxconf_i->rx_drop_en = 1;
+
+ /*
+ * Adding a new slave device may cause some of previously inherited
+ * offloads to be withdrawn from the internal rx_queue_offload_capa
+ * value. Thus, the new internal value of default Rx queue offloads
+ * has to be masked by rx_queue_offload_capa to make sure that only
+ * commonly supported offloads are preserved from both the previous
+ * value and the value being inhereted from the new slave device.
+ */
+ rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
+ internals->rx_queue_offload_capa;
+
+ /*
+ * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
+ * the power of 2, the lower one is GCD
+ */
+ if (internals->reta_size > di->reta_size)
+ internals->reta_size = di->reta_size;
+
+ if (!internals->max_rx_pktlen &&
+ di->max_rx_pktlen < internals->candidate_max_rx_pktlen)
+ internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
+}
+
+static void
+eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
+ const struct rte_eth_dev_info *di)
+{
+ struct rte_eth_txconf *txconf_i = &internals->default_txconf;
+ const struct rte_eth_txconf *txconf = &di->default_txconf;
+
+ internals->tx_offload_capa &= di->tx_offload_capa;
+ internals->tx_queue_offload_capa &= di->tx_queue_offload_capa;
+
+ /*
+ * Adding a new slave device may cause some of previously inherited
+ * offloads to be withdrawn from the internal tx_queue_offload_capa
+ * value. Thus, the new internal value of default Tx queue offloads
+ * has to be masked by tx_queue_offload_capa to make sure that only
+ * commonly supported offloads are preserved from both the previous
+ * value and the value being inhereted from the new slave device.
+ */
+ txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
+ internals->tx_queue_offload_capa;
+}
+
+static void
+eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim,
+ const struct rte_eth_desc_lim *slave_desc_lim)
+{
+ memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim));
+}
+
+static int
+eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim,
+ const struct rte_eth_desc_lim *slave_desc_lim)
+{
+ bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max,
+ slave_desc_lim->nb_max);
+ bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min,
+ slave_desc_lim->nb_min);
+ bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align,
+ slave_desc_lim->nb_align);
+
+ if (bond_desc_lim->nb_min > bond_desc_lim->nb_max ||
+ bond_desc_lim->nb_align > bond_desc_lim->nb_max) {
+ RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits");
+ return -EINVAL;
+ }
+
+ /* Treat maximum number of segments equal to 0 as unspecified */
+ if (slave_desc_lim->nb_seg_max != 0 &&
+ (bond_desc_lim->nb_seg_max == 0 ||
+ slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max))
+ bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max;
+ if (slave_desc_lim->nb_mtu_seg_max != 0 &&
+ (bond_desc_lim->nb_mtu_seg_max == 0 ||
+ slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max))
+ bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max;
+
+ return 0;
+}
+
static int
__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
{
@@ -326,34 +493,28 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
- internals->reta_size = dev_info.reta_size;
+ eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info);
+ eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info);
- /* Take the first dev's offload capabilities */
- internals->rx_offload_capa = dev_info.rx_offload_capa;
- internals->tx_offload_capa = dev_info.tx_offload_capa;
- internals->rx_queue_offload_capa = dev_info.rx_queue_offload_capa;
- internals->tx_queue_offload_capa = dev_info.tx_queue_offload_capa;
- internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
+ eth_bond_slave_inherit_desc_lim_first(&internals->rx_desc_lim,
+ &dev_info.rx_desc_lim);
+ eth_bond_slave_inherit_desc_lim_first(&internals->tx_desc_lim,
+ &dev_info.tx_desc_lim);
+ } else {
+ int ret;
- /* Inherit first slave's max rx packet size */
- internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
+ eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info);
+ eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info);
- } else {
- internals->rx_offload_capa &= dev_info.rx_offload_capa;
- internals->tx_offload_capa &= dev_info.tx_offload_capa;
- internals->rx_queue_offload_capa &= dev_info.rx_queue_offload_capa;
- internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa;
- internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
-
- /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
- * the power of 2, the lower one is GCD
- */
- if (internals->reta_size > dev_info.reta_size)
- internals->reta_size = dev_info.reta_size;
+ ret = eth_bond_slave_inherit_desc_lim_next(
+ &internals->rx_desc_lim, &dev_info.rx_desc_lim);
+ if (ret != 0)
+ return ret;
- if (!internals->max_rx_pktlen &&
- dev_info.max_rx_pktlen < internals->candidate_max_rx_pktlen)
- internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
+ ret = eth_bond_slave_inherit_desc_lim_next(
+ &internals->tx_desc_lim, &dev_info.tx_desc_lim);
+ if (ret != 0)
+ return ret;
}
bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
diff --git a/drivers/net/bonding/rte_eth_bond_flow.c b/drivers/net/bonding/rte_eth_bond_flow.c
index 31e4bcae..f94d46ca 100644
--- a/drivers/net/bonding/rte_eth_bond_flow.c
+++ b/drivers/net/bonding/rte_eth_bond_flow.c
@@ -2,8 +2,11 @@
* Copyright 2018 Mellanox Technologies, Ltd
*/
+#include <stddef.h>
+#include <string.h>
#include <sys/queue.h>
+#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_tailq.h>
#include <rte_flow.h>
@@ -16,19 +19,33 @@ bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr,
const struct rte_flow_action *actions)
{
struct rte_flow *flow;
- size_t fdsz;
+ const struct rte_flow_conv_rule rule = {
+ .attr_ro = attr,
+ .pattern_ro = items,
+ .actions_ro = actions,
+ };
+ struct rte_flow_error error;
+ int ret;
- fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
- flow = rte_zmalloc_socket(NULL, sizeof(struct rte_flow) + fdsz,
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR, "Unable to process flow rule (%s): %s",
+ error.message ? error.message : "unspecified",
+ strerror(rte_errno));
+ return NULL;
+ }
+ flow = rte_zmalloc_socket(NULL, offsetof(struct rte_flow, rule) + ret,
RTE_CACHE_LINE_SIZE, numa_node);
if (unlikely(flow == NULL)) {
RTE_BOND_LOG(ERR, "Could not allocate new flow");
return NULL;
}
- flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
- if (unlikely(rte_flow_copy(flow->fd, fdsz, attr, items, actions) !=
- fdsz)) {
- RTE_BOND_LOG(ERR, "Failed to copy flow description");
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule,
+ &error);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR, "Failed to copy flow rule (%s): %s",
+ error.message ? error.message : "unspecified",
+ strerror(rte_errno));
rte_free(flow);
return NULL;
}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 58f7377c..156f31c6 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -37,7 +37,8 @@ get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
{
size_t vlan_offset = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto ||
+ rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) {
struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
vlan_offset = sizeof(struct vlan_hdr);
@@ -57,28 +58,34 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
struct bond_dev_private *internals;
- uint16_t num_rx_slave = 0;
uint16_t num_rx_total = 0;
-
+ uint16_t slave_count;
+ uint16_t active_slave;
int i;
/* Cast to structure, containing bonded device's port id and queue id */
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
-
internals = bd_rx_q->dev_private;
+ slave_count = internals->active_slave_count;
+ active_slave = internals->active_slave;
+ for (i = 0; i < slave_count && nb_pkts; i++) {
+ uint16_t num_rx_slave;
- for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
/* Offset of pointer to *bufs increases as packets are received
* from other slaves */
- num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
- bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
- if (num_rx_slave) {
- num_rx_total += num_rx_slave;
- nb_pkts -= num_rx_slave;
- }
+ num_rx_slave =
+ rte_eth_rx_burst(internals->active_slaves[active_slave],
+ bd_rx_q->queue_id,
+ bufs + num_rx_total, nb_pkts);
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+ if (++active_slave == slave_count)
+ active_slave = 0;
}
+ if (++internals->active_slave == slave_count)
+ internals->active_slave = 0;
return num_rx_total;
}
@@ -257,25 +264,32 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
uint16_t num_rx_total = 0; /* Total number of received packets */
uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t slave_count;
-
- uint16_t i, idx;
+ uint16_t active_slave;
+ uint16_t i;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
+ active_slave = internals->active_slave;
memcpy(slaves, internals->active_slaves,
sizeof(internals->active_slaves[0]) * slave_count);
- for (i = 0, idx = internals->active_slave;
- i < slave_count && num_rx_total < nb_pkts; i++, idx++) {
- idx = idx % slave_count;
+ for (i = 0; i < slave_count && nb_pkts; i++) {
+ uint16_t num_rx_slave;
/* Read packets from this slave */
- num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
- &bufs[num_rx_total], nb_pkts - num_rx_total);
+ num_rx_slave = rte_eth_rx_burst(slaves[active_slave],
+ bd_rx_q->queue_id,
+ bufs + num_rx_total, nb_pkts);
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+
+ if (++active_slave == slave_count)
+ active_slave = 0;
}
- internals->active_slave = idx;
+ if (++internals->active_slave == slave_count)
+ internals->active_slave = 0;
return num_rx_total;
}
@@ -300,10 +314,10 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
@@ -320,7 +334,7 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
dist_slave_port_ids[dist_slave_count++] =
@@ -358,34 +372,12 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow reordering
- * later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++) {
- slave_bufs[i][j] =
- slave_bufs[i][(slave_tx_count - 1) + j];
- }
- }
- }
-
- /*
- * If there are tx burst failures we move packets to end of bufs to
- * preserve expected PMD behaviour of all failed transmitted being
- * at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- bufs[bufs_idx++] = slave_bufs[i][j];
- }
+ total_tx_fail_count += slave_tx_fail_count;
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
@@ -400,8 +392,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* Cast to structure, containing bonded device's port id and queue id */
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
struct bond_dev_private *internals = bd_rx_q->dev_private;
- struct ether_addr bond_mac;
-
+ struct rte_eth_dev *bonded_eth_dev =
+ &rte_eth_devices[internals->port_id];
+ struct ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
struct ether_hdr *hdr;
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
@@ -414,7 +407,6 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint8_t i, j, k;
uint8_t subtype;
- rte_eth_macaddr_get(internals->port_id, &bond_mac);
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
@@ -428,7 +420,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
}
for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
j = num_rx_total;
- collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
+ collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
COLLECTING);
/* Read packets from this slave */
@@ -457,9 +449,11 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
* in collecting state or bonding interface is not in promiscuous
* mode and packet address does not match. */
if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
- !collecting || (!promisc &&
- !is_multicast_ether_addr(&hdr->d_addr) &&
- !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
+ !collecting ||
+ (!promisc &&
+ !is_multicast_ether_addr(&hdr->d_addr) &&
+ !is_same_ether_addr(bond_mac,
+ &hdr->d_addr)))) {
if (hdr->ether_type == ether_type_slow_be) {
bond_mode_8023ad_handle_slow_pkt(
@@ -480,7 +474,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
idx = 0;
}
- internals->active_slave = idx;
+ if (++internals->active_slave == slave_count)
+ internals->active_slave = 0;
+
return num_rx_total;
}
@@ -715,8 +711,8 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
tx_fail_total += tx_fail_slave;
memcpy(&bufs[nb_pkts - tx_fail_total],
- &slave_bufs[i][num_tx_slave],
- tx_fail_slave * sizeof(bufs[0]));
+ &slave_bufs[i][num_tx_slave],
+ tx_fail_slave * sizeof(bufs[0]));
}
num_tx_total += num_tx_slave;
}
@@ -1221,10 +1217,10 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[nb_bufs];
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
@@ -1265,34 +1261,12 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow reordering
- * later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++) {
- slave_bufs[i][j] =
- slave_bufs[i][(slave_tx_count - 1) + j];
- }
- }
- }
-
- /*
- * If there are tx burst failures we move packets to end of bufs to
- * preserve expected PMD behaviour of all failed transmitted being
- * at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- bufs[bufs_idx++] = slave_bufs[i][j];
- }
+ total_tx_fail_count += slave_tx_fail_count;
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
@@ -1319,10 +1293,10 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
@@ -1338,7 +1312,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
dist_slave_port_ids[dist_slave_count++] =
@@ -1380,46 +1354,20 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow
- * reordering later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- slave_bufs[i][j] =
- slave_bufs[i]
- [(slave_tx_count - 1)
- + j];
- }
- }
+ total_tx_fail_count += slave_tx_fail_count;
- /*
- * If there are tx burst failures we move packets to end of
- * bufs to preserve expected PMD behaviour of all failed
- * transmitted being at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0;
- j < slave_tx_fail_count[i];
- j++) {
- bufs[bufs_idx++] =
- slave_bufs[i][j];
- }
- }
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
}
/* Check for LACP control packets and send if available */
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
struct rte_mbuf *ctrl_pkt = NULL;
if (likely(rte_ring_empty(port->tx_ring)))
@@ -1770,7 +1718,7 @@ slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
int errval = 0;
struct bond_dev_private *internals = (struct bond_dev_private *)
bonded_eth_dev->data->dev_private;
- struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
if (port->slow_pool == NULL) {
char mem_name[256];
@@ -1847,12 +1795,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
/* If RSS is enabled for bonding, try to enable it for slaves */
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
- if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
- != 0) {
+ if (internals->rss_key_len != 0) {
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
- bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ internals->rss_key_len;
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
- bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ internals->rss_key;
} else {
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
}
@@ -2210,7 +2157,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
/* Discard all messages to/from mode 4 state machines */
for (i = 0; i < internals->active_slave_count; i++) {
- port = &mode_8023ad_ports[internals->active_slaves[i]];
+ port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
RTE_ASSERT(port->rx_ring != NULL);
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
@@ -2229,12 +2176,15 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- internals->link_status_polling_enabled = 0;
- for (i = 0; i < internals->slave_count; i++)
- internals->slaves[i].last_link_status = 0;
-
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
+
+ internals->link_status_polling_enabled = 0;
+ for (i = 0; i < internals->slave_count; i++) {
+ internals->slaves[i].last_link_status = 0;
+ rte_eth_dev_stop(internals->slaves[i].port_id);
+ deactivate_slave(eth_dev, internals->slaves[i].port_id);
+ }
}
void
@@ -2303,6 +2253,16 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = max_nb_rx_queues;
dev_info->max_tx_queues = max_nb_tx_queues;
+ memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
+ sizeof(dev_info->default_rxconf));
+ memcpy(&dev_info->default_txconf, &internals->default_txconf,
+ sizeof(dev_info->default_txconf));
+
+ memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim,
+ sizeof(dev_info->rx_desc_lim));
+ memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim,
+ sizeof(dev_info->tx_desc_lim));
+
/**
* If dedicated hw queues enabled for link bonding device in LACP mode
* then we need to reduce the maximum number of data path queues by 1.
@@ -3123,6 +3083,14 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
/* Initially allow to choose any offload type */
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ memset(&internals->default_rxconf, 0,
+ sizeof(internals->default_rxconf));
+ memset(&internals->default_txconf, 0,
+ sizeof(internals->default_txconf));
+
+ memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
+ memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
+
memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
memset(internals->slaves, 0, sizeof(internals->slaves));
@@ -3162,10 +3130,9 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
err:
rte_free(internals);
- if (eth_dev != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- rte_eth_dev_release_port(eth_dev);
- }
+ if (eth_dev != NULL)
+ eth_dev->data->dev_private = NULL;
+ rte_eth_dev_release_port(eth_dev);
return -1;
}
@@ -3186,8 +3153,7 @@ bond_probe(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
RTE_BOND_LOG(ERR, "Failed to probe %s", name);
@@ -3302,6 +3268,9 @@ bond_remove(struct rte_vdev_device *dev)
if (eth_dev == NULL)
return -ENODEV;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
+
RTE_ASSERT(eth_dev->device == &dev->device);
internals = eth_dev->data->dev_private;
@@ -3324,8 +3293,6 @@ bond_remove(struct rte_vdev_device *dev)
rte_mempool_free(internals->mode6.mempool);
rte_bitmap_free(internals->vlan_filter_bmp);
rte_free(internals->vlan_filter_bmpmem);
- rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data->mac_addrs);
rte_eth_dev_release_port(eth_dev);
@@ -3353,16 +3320,30 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
unsigned i, j;
- /* If RSS is enabled, fill table and key with default values */
+ /*
+ * If RSS is enabled, fill table with default values and
+ * set key to the the value specified in port RSS configuration.
+ * Fall back to default RSS key if the key is not specified
+ */
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
- memcpy(internals->rss_key, default_rss_key, 40);
+ if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
+ internals->rss_key_len =
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ memcpy(internals->rss_key,
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
+ internals->rss_key_len);
+ } else {
+ internals->rss_key_len = sizeof(default_rss_key);
+ memcpy(internals->rss_key, default_rss_key,
+ internals->rss_key_len);
+ }
for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
internals->reta_conf[i].mask = ~0LL;
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
- internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
+ internals->reta_conf[i].reta[j] =
+ (i * RTE_RETA_GROUP_SIZE + j) %
+ dev->data->nb_rx_queues;
}
}
@@ -3618,7 +3599,7 @@ int bond_logtype;
RTE_INIT(bond_init_log)
{
- bond_logtype = rte_log_register("pmd.net.bon");
+ bond_logtype = rte_log_register("pmd.net.bond");
if (bond_logtype >= 0)
rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);
}
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 43e0e448..3ea5d686 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -5,9 +5,11 @@
#ifndef _RTE_ETH_BOND_PRIVATE_H_
#define _RTE_ETH_BOND_PRIVATE_H_
+#include <stdint.h>
#include <sys/queue.h>
#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
#include <rte_spinlock.h>
#include <rte_bitmap.h>
#include <rte_flow_driver.h>
@@ -93,7 +95,8 @@ struct rte_flow {
/* Slaves flows */
struct rte_flow *flows[RTE_MAX_ETHPORTS];
/* Flow description for synchronization */
- struct rte_flow_desc *fd;
+ struct rte_flow_conv_rule rule;
+ uint8_t rule_data[];
};
typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
@@ -160,6 +163,11 @@ struct bond_dev_private {
/** Bit mask of RSS offloads, the bit offset also means flow type */
uint64_t flow_type_rss_offloads;
+ struct rte_eth_rxconf default_rxconf; /**< Default RxQ conf. */
+ struct rte_eth_txconf default_txconf; /**< Default TxQ conf. */
+ struct rte_eth_desc_lim rx_desc_lim; /**< Rx descriptor limits */
+ struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */
+
uint16_t reta_size;
struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
RTE_RETA_GROUP_SIZE];
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index 5d66c4b3..68466f13 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -53,6 +53,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += mps_tcam.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += l2t.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index e98dd218..47cfc5f5 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -324,7 +324,11 @@ struct adapter {
unsigned int clipt_start; /* CLIP table start */
unsigned int clipt_end; /* CLIP table end */
+ unsigned int l2t_start; /* Layer 2 table start */
+ unsigned int l2t_end; /* Layer 2 table end */
struct clip_tbl *clipt; /* CLIP table */
+ struct l2t_data *l2t; /* Layer 2 table */
+ struct mpstcam_table *mpstcam;
struct tid_info tids; /* Info used to access TID related tables */
};
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 157201da..fd200668 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -157,6 +157,7 @@ struct tp_params {
int port_shift;
int protocol_shift;
int ethertype_shift;
+ int macmatch_shift;
u64 hash_filter_mask;
};
@@ -270,6 +271,7 @@ struct adapter_params {
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
u8 fw_caps_support; /* 32-bit Port Capabilities */
+ u8 filter2_wr_support; /* FW support for FILTER2_WR */
};
/* Firmware Port Capabilities types.
@@ -388,6 +390,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int idx, const u8 *addr, bool persist, bool add_smt);
int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 31762c9c..701e0b1f 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -4162,6 +4162,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ val = V_FW_CMD_LEN16(1) |
+ V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
+ V_DATAPORTNUM(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
+ V_DATAPORTNUM(M_DATAPORTNUM));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != (int)idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 raw;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_CMD_EXEC(0) |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
+ raw |
+ V_FW_CMD_LEN16(1));
+
+ p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
+ V_DATAPORTNUM(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
+ V_DATAPORTNUM(M_DATAPORTNUM));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
* t4_change_mac - modifies the exact-match filter for a MAC address
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -5145,6 +5251,8 @@ int t4_init_tp_params(struct adapter *adap)
F_PROTOCOL);
adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
F_ETHERTYPE);
+ adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
+ F_MACMATCH);
/*
* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 5d433c91..9e052b0f 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -11,7 +11,9 @@ enum {
CPL_SET_TCB_FIELD = 0x5,
CPL_ABORT_REQ = 0xA,
CPL_ABORT_RPL = 0xB,
+ CPL_L2T_WRITE_REQ = 0x12,
CPL_TID_RELEASE = 0x1A,
+ CPL_L2T_WRITE_RPL = 0x23,
CPL_ACT_OPEN_RPL = 0x25,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SET_TCB_RPL = 0x3A,
@@ -30,6 +32,7 @@ enum CPL_error {
enum {
ULP_MODE_NONE = 0,
+ ULP_MODE_TCPDDP = 5,
};
enum {
@@ -66,6 +69,9 @@ union opcode_tid {
#define M_TID_TID 0x3fff
#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+#define S_TID_QID 14
+#define V_TID_QID(x) ((x) << S_TID_QID)
+
struct rss_header {
__u8 opcode;
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -133,6 +139,12 @@ struct work_request_hdr {
#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL)
+#define S_L2T_IDX 36
+#define V_L2T_IDX(x) ((__u64)(x) << S_L2T_IDX)
+
+#define S_NAGLE 49
+#define V_NAGLE(x) ((__u64)(x) << S_NAGLE)
+
/* option 2 fields */
#define S_RSS_QUEUE 0
#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
@@ -151,6 +163,9 @@ struct work_request_hdr {
#define S_CCTRL_ECN 27
#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+#define S_SACK_EN 30
+#define V_SACK_EN(x) ((x) << S_SACK_EN)
+
#define S_T5_OPT_2_VALID 31
#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
@@ -421,6 +436,35 @@ struct cpl_rx_pkt {
__be16 err_vec;
};
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 params;
+ __be16 l2t_idx;
+ __be16 vlan;
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_PORT 8
+#define V_L2T_W_PORT(x) ((x) << S_L2T_W_PORT)
+
+#define S_L2T_W_LPBK 10
+#define V_L2T_W_LPBK(x) ((x) << S_L2T_W_LPBK)
+
+#define S_L2T_W_ARPMISS 11
+#define V_L2T_W_ARPMISS(x) ((x) << S_L2T_W_ARPMISS)
+
+#define S_L2T_W_NOREPLY 15
+#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
+
+struct cpl_l2t_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
/* rx_pkt.l2info fields */
#define S_RXF_UDP 22
#define V_RXF_UDP(x) ((x) << S_RXF_UDP)
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 6f872edc..af8c741e 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -45,6 +45,14 @@
#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8)
#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512
+#define S_DATAPORTNUM 12
+#define M_DATAPORTNUM 0xfU
+#define V_DATAPORTNUM(x) ((x) << S_DATAPORTNUM)
+
+#define S_DATALKPTYPE 10
+#define M_DATALKPTYPE 0x3U
+#define V_DATALKPTYPE(x) ((x) << S_DATALKPTYPE)
+
/* registers for module SGE */
#define SGE_BASE_ADDR 0x1000
diff --git a/drivers/net/cxgbe/base/t4_tcb.h b/drivers/net/cxgbe/base/t4_tcb.h
index 25435f9f..68cda773 100644
--- a/drivers/net/cxgbe/base/t4_tcb.h
+++ b/drivers/net/cxgbe/base/t4_tcb.h
@@ -6,6 +6,9 @@
#ifndef _T4_TCB_DEFS_H
#define _T4_TCB_DEFS_H
+/* 95:32 */
+#define W_TCB_T_FLAGS 1
+
/* 105:96 */
#define W_TCB_RSS_INFO 3
#define S_TCB_RSS_INFO 0
@@ -23,4 +26,6 @@
#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+#define S_TF_CCTRL_RFR 62
+
#endif /* _T4_TCB_DEFS_H */
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index e80b58a3..06d3ef3a 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -61,6 +61,7 @@ enum fw_wr_opcodes {
FW_ETH_TX_PKTS_WR = 0x09,
FW_ETH_TX_PKT_VM_WR = 0x11,
FW_ETH_TX_PKTS_VM_WR = 0x12,
+ FW_FILTER2_WR = 0x77,
FW_ETH_TX_PKTS2_WR = 0x78,
};
@@ -165,7 +166,7 @@ enum fw_filter_wr_cookie {
FW_FILTER_WR_EINVAL,
};
-struct fw_filter_wr {
+struct fw_filter2_wr {
__be32 op_pkd;
__be32 len16_pkd;
__be64 r3;
@@ -195,6 +196,19 @@ struct fw_filter_wr {
__be16 fpm;
__be16 r7;
__u8 sma[6];
+ __be16 r8;
+ __u8 filter_type_swapmac;
+ __u8 natmode_to_ulp_type;
+ __be16 newlport;
+ __be16 newfport;
+ __u8 newlip[16];
+ __u8 newfip[16];
+ __be32 natseqcheck;
+ __be32 r9;
+ __be64 r10;
+ __be64 r11;
+ __be64 r12;
+ __be64 r13;
};
#define S_FW_FILTER_WR_TID 12
@@ -300,6 +314,15 @@ struct fw_filter_wr {
#define S_FW_FILTER_WR_MATCHTYPEM 0
#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+#define S_FW_FILTER2_WR_SWAPMAC 0
+#define V_FW_FILTER2_WR_SWAPMAC(x) ((x) << S_FW_FILTER2_WR_SWAPMAC)
+
+#define S_FW_FILTER2_WR_NATMODE 5
+#define V_FW_FILTER2_WR_NATMODE(x) ((x) << S_FW_FILTER2_WR_NATMODE)
+
+#define S_FW_FILTER2_WR_ULP_TYPE 0
+#define V_FW_FILTER2_WR_ULP_TYPE(x) ((x) << S_FW_FILTER2_WR_ULP_TYPE)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -655,6 +678,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */
FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+ FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
};
/*
@@ -665,6 +689,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
+ FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A
};
@@ -1280,12 +1306,17 @@ struct fw_vi_cmd {
/* Special VI_MAC command index ids */
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
enum fw_vi_mac_smac {
FW_VI_MAC_MPS_TCAM_ENTRY,
FW_VI_MAC_SMT_AND_MPSTCAM
};
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_RAW = 0x2,
+};
+
struct fw_vi_mac_cmd {
__be32 op_to_viid;
__be32 freemacs_to_len16;
@@ -1297,6 +1328,13 @@ struct fw_vi_mac_cmd {
struct fw_vi_mac_hash {
__be64 hashvec;
} hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
} u;
};
@@ -1306,6 +1344,12 @@ struct fw_vi_mac_cmd {
#define G_FW_VI_MAC_CMD_VIID(x) \
(((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID)
+#define S_FW_VI_MAC_CMD_FREEMACS 31
+#define V_FW_VI_MAC_CMD_FREEMACS(x) ((x) << S_FW_VI_MAC_CMD_FREEMACS)
+
+#define S_FW_VI_MAC_CMD_ENTRY_TYPE 23
+#define V_FW_VI_MAC_CMD_ENTRY_TYPE(x) ((x) << S_FW_VI_MAC_CMD_ENTRY_TYPE)
+
#define S_FW_VI_MAC_CMD_VALID 15
#define M_FW_VI_MAC_CMD_VALID 0x1
#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID)
@@ -1325,6 +1369,12 @@ struct fw_vi_mac_cmd {
#define G_FW_VI_MAC_CMD_IDX(x) \
(((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX)
+#define S_FW_VI_MAC_CMD_RAW_IDX 16
+#define M_FW_VI_MAC_CMD_RAW_IDX 0xffff
+#define V_FW_VI_MAC_CMD_RAW_IDX(x) ((x) << S_FW_VI_MAC_CMD_RAW_IDX)
+#define G_FW_VI_MAC_CMD_RAW_IDX(x) \
+ (((x) >> S_FW_VI_MAC_CMD_RAW_IDX) & M_FW_VI_MAC_CMD_RAW_IDX)
+
struct fw_vi_rxmode_cmd {
__be32 op_to_viid;
__be32 retval_len16;
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 5e6f5c98..eb58f880 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -34,6 +34,21 @@
ETH_RSS_IPV6_UDP_EX)
#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+/* Tx/Rx Offloads supported */
+#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO)
+
+#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_SCATTER)
+
+
#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 4dcad7a2..b2f83ea3 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -59,19 +59,6 @@
*/
#include "t4_pci_id_tbl.h"
-#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT |\
- DEV_TX_OFFLOAD_IPV4_CKSUM |\
- DEV_TX_OFFLOAD_UDP_CKSUM |\
- DEV_TX_OFFLOAD_TCP_CKSUM |\
- DEV_TX_OFFLOAD_TCP_TSO)
-
-#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP |\
- DEV_RX_OFFLOAD_CRC_STRIP |\
- DEV_RX_OFFLOAD_IPV4_CKSUM |\
- DEV_RX_OFFLOAD_JUMBO_FRAME |\
- DEV_RX_OFFLOAD_UDP_CKSUM |\
- DEV_RX_OFFLOAD_TCP_CKSUM)
-
uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -341,6 +328,7 @@ void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
struct adapter *adapter = pi->adapter;
int err = 0, i;
@@ -361,6 +349,11 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
goto out;
}
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
+ else
+ eth_dev->data->scattered_rx = 0;
+
cxgbe_enable_rx_queues(pi);
err = setup_rss(pi);
@@ -407,26 +400,16 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
* have been disabled
*/
t4_sge_eth_clear_queues(pi);
+ eth_dev->data->scattered_rx = 0;
}
int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
- uint64_t configured_offloads;
int err;
CXGBE_FUNC_TRACE();
- configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
-
- /* KEEP_CRC offload flag is not supported by PMD
- * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
- */
- if (rte_eth_dev_must_keep_crc(configured_offloads)) {
- dev_info(adapter, "can't disable hw crc strip\n");
- eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_CRC_STRIP;
- }
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = setup_sge_fwevtq(adapter);
@@ -1075,11 +1058,9 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct port_info *pi = (struct port_info *)(dev->data->dev_private);
- struct adapter *adapter = pi->adapter;
int ret;
- ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
- pi->xact_addr_filt, (u8 *)addr, true, true);
+ ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
if (ret < 0) {
dev_err(adapter, "failed to set mac addr; err = %d\n",
ret);
diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
index 7f0d3800..ef1102be 100644
--- a/drivers/net/cxgbe/cxgbe_filter.c
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -8,6 +8,7 @@
#include "t4_regs.h"
#include "cxgbe_filter.h"
#include "clip_tbl.h"
+#include "l2t.h"
/**
* Initialize Hash Filters
@@ -65,7 +66,8 @@ int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
#define U(_mask, _field) \
(!(fconf & (_mask)) && S(_field))
- if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
+ if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
+ U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx))
return -EOPNOTSUPP;
#undef S
@@ -87,6 +89,12 @@ int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
if (fs->val.iport >= adapter->params.nports)
return -ERANGE;
+ if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
+ return -EOPNOTSUPP;
+
+ if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -165,6 +173,16 @@ static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
}
/**
+ * Set one of the t_flags bits in the TCB.
+ */
+static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
+ unsigned int bit_pos, unsigned int val, int no_reply)
+{
+ set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
+ (unsigned long long)val << bit_pos, no_reply);
+}
+
+/**
* Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
*/
static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
@@ -257,6 +275,8 @@ static u64 hash_filter_ntuple(const struct filter_entry *f)
if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
+ if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
+ ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
if (ntuple != tp->hash_filter_mask)
return 0;
@@ -425,7 +445,10 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
req->local_ip_lo = local_lo;
req->peer_ip_hi = peer_hi;
req->peer_ip_lo = peer_lo;
- req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_DELACK(f->fs.hitcnts) |
+ V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
<< 1) |
V_TX_CHAN(f->fs.eport) |
@@ -436,6 +459,7 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
V_RSS_QUEUE(f->fs.iq) |
F_T5_OPT_2_VALID |
F_RX_CHANNEL |
+ V_SACK_EN(f->fs.swapmac) |
V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
@@ -468,7 +492,10 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
- req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_DELACK(f->fs.hitcnts) |
+ V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
<< 1) |
V_TX_CHAN(f->fs.eport) |
@@ -479,6 +506,7 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
V_RSS_QUEUE(f->fs.iq) |
F_T5_OPT_2_VALID |
F_RX_CHANNEL |
+ V_SACK_EN(f->fs.swapmac) |
V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
@@ -518,6 +546,22 @@ static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
f->dev = dev;
f->fs.iq = iq;
+ /*
+ * If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) {
+ /* allocate L2T entry for new filter */
+ f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ }
+
atid = cxgbe_alloc_atid(t, f);
if (atid < 0)
goto out_err;
@@ -591,6 +635,7 @@ void clear_filter(struct filter_entry *f)
/**
* t4_mk_filtdelwr - create a delete filter WR
+ * @adap: adapter context
* @ftid: the filter ID
* @wr: the filter work request to populate
* @qid: ingress queue to receive the delete notification
@@ -598,10 +643,14 @@ void clear_filter(struct filter_entry *f)
* Creates a filter work request to delete the supplied filter. If @qid is
* negative the delete notification is suppressed.
*/
-static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
+ struct fw_filter2_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
- wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ if (adap->params.filter2_wr_support)
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
+ else
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
V_FW_FILTER_WR_NOREPLY(qid < 0));
@@ -619,7 +668,7 @@ static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
struct adapter *adapter = ethdev2adap(dev);
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct rte_mbuf *mbuf;
- struct fw_filter_wr *fwr;
+ struct fw_filter2_wr *fwr;
struct sge_ctrl_txq *ctrlq;
unsigned int port_id = ethdev2pinfo(dev)->port_id;
@@ -631,8 +680,8 @@ static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
mbuf->data_len = sizeof(*fwr);
mbuf->pkt_len = mbuf->data_len;
- fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
- t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
+ t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
/*
* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -648,11 +697,24 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
struct adapter *adapter = ethdev2adap(dev);
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct rte_mbuf *mbuf;
- struct fw_filter_wr *fwr;
+ struct fw_filter2_wr *fwr;
struct sge_ctrl_txq *ctrlq;
unsigned int port_id = ethdev2pinfo(dev)->port_id;
int ret;
+ /*
+ * If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t)
+ return -ENOMEM;
+ }
+
ctrlq = &adapter->sge.ctrlq[port_id];
mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
if (!mbuf) {
@@ -663,13 +725,16 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
mbuf->data_len = sizeof(*fwr);
mbuf->pkt_len = mbuf->data_len;
- fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
memset(fwr, 0, sizeof(*fwr));
/*
* Construct the work request to set the filter.
*/
- fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ if (adapter->params.filter2_wr_support)
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
+ else
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
fwr->tid_to_iq =
cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
@@ -680,9 +745,16 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_INSVLAN
+ (f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_RMVLAN
+ (f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
- V_FW_FILTER_WR_PRIO(f->fs.prio));
+ V_FW_FILTER_WR_PRIO(f->fs.prio) |
+ V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
fwr->smac_sel = 0;
@@ -691,7 +763,9 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
));
fwr->maci_to_matchtypem =
- cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
+ V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
+ V_FW_FILTER_WR_PORT(f->fs.val.iport) |
V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
fwr->ptcl = f->fs.val.proto;
fwr->ptclm = f->fs.mask.proto;
@@ -704,6 +778,20 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
fwr->fp = cpu_to_be16(f->fs.val.fport);
fwr->fpm = cpu_to_be16(f->fs.mask.fport);
+ if (adapter->params.filter2_wr_support) {
+ fwr->filter_type_swapmac =
+ V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
+ fwr->natmode_to_ulp_type =
+ V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
+ ULP_MODE_TCPDDP :
+ ULP_MODE_NONE) |
+ V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
+ memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
+ memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
+ fwr->newlport = cpu_to_be16(f->fs.nat_lport);
+ fwr->newfport = cpu_to_be16(f->fs.nat_fport);
+ }
+
/*
* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@@ -1046,6 +1134,9 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
V_TCB_TIMESTAMP(0ULL) |
V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1);
+ if (f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE)
+ set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
break;
}
default:
diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h
index af8fa752..b7bcbf56 100644
--- a/drivers/net/cxgbe/cxgbe_filter.h
+++ b/drivers/net/cxgbe/cxgbe_filter.h
@@ -77,6 +77,7 @@ struct ch_filter_tuple {
* Filter specification
*/
struct ch_filter_specification {
+ void *private;
/* Administrative fields for filter. */
uint32_t hitcnts:1; /* count filter hits in TCB */
uint32_t prio:1; /* filter has priority over active/server */
@@ -99,6 +100,22 @@ struct ch_filter_specification {
uint32_t iq:10; /* ingress queue */
uint32_t eport:2; /* egress port to switch packet out */
+ uint32_t swapmac:1; /* swap SMAC/DMAC for loopback packet */
+ uint32_t newvlan:2; /* rewrite VLAN Tag */
+ uint8_t dmac[ETHER_ADDR_LEN]; /* new destination MAC address */
+ uint16_t vlan; /* VLAN Tag to insert */
+
+ /*
+ * Switch proxy/rewrite fields. An ingress packet which matches a
+ * filter with "switch" set will be looped back out as an egress
+ * packet -- potentially with some header rewriting.
+ */
+ uint32_t nat_mode:3; /* specify NAT operation mode */
+
+ uint8_t nat_lip[16]; /* local IP to use after NAT'ing */
+ uint8_t nat_fip[16]; /* foreign IP to use after NAT'ing */
+ uint16_t nat_lport; /* local port number to use after NAT'ing */
+ uint16_t nat_fport; /* foreign port number to use after NAT'ing */
/* Filter rule value/mask pairs. */
struct ch_filter_tuple val;
@@ -111,6 +128,23 @@ enum {
FILTER_SWITCH
};
+enum {
+ VLAN_REMOVE = 1,
+ VLAN_INSERT,
+ VLAN_REWRITE
+};
+
+enum {
+ NAT_MODE_NONE = 0, /* No NAT performed */
+ NAT_MODE_DIP, /* NAT on Dst IP */
+ NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */
+ NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */
+ NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */
+ NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */
+ NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */
+ NAT_MODE_ALL /* NAT on entire 4-tuple */
+};
+
enum filter_type {
FILTER_TYPE_IPV4 = 0,
FILTER_TYPE_IPV6,
@@ -145,6 +179,7 @@ struct filter_entry {
u32 pending:1; /* filter action is pending FW reply */
struct filter_ctx *ctx; /* caller's completion hook */
struct clip_entry *clipt; /* CLIP Table entry for IPv6 */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
struct rte_eth_dev *dev; /* Port's rte eth device */
void *private; /* For use by apps using filter_entry */
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index 01c945f1..54ec7e59 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -95,6 +95,8 @@ cxgbe_fill_filter_region(struct adapter *adap,
ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
if (tp->port_shift >= 0)
ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+ if (tp->macmatch_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
if (ntuple_mask != hash_filter_mask)
return;
@@ -103,6 +105,46 @@ cxgbe_fill_filter_region(struct adapter *adap,
}
static int
+ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *umask = item->mask;
+ const struct rte_flow_item_eth *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
+
+ /* we don't support SRC_MAC filtering*/
+ if (!is_zero_ether_addr(&mask->src))
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "src mac filtering not supported");
+
+ if (!is_zero_ether_addr(&mask->dst)) {
+ const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
+ const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
+ struct rte_flow *flow = (struct rte_flow *)fs->private;
+ struct port_info *pi = (struct port_info *)
+ (flow->dev->data->dev_private);
+ int idx;
+
+ idx = cxgbe_mpstcam_alloc(pi, addr, m);
+ if (idx <= 0)
+ return rte_flow_error_set(e, idx,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unable to allocate mac"
+ " entry in h/w");
+ CXGBE_FILL_FS(idx, 0x1ff, macidx);
+ }
+
+ CXGBE_FILL_FS(be16_to_cpu(spec->type),
+ be16_to_cpu(mask->type), ethtype);
+ return 0;
+}
+
+static int
ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
struct ch_filter_specification *fs,
struct rte_flow_error *e)
@@ -327,17 +369,199 @@ static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
}
static int
+cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
+{
+ const struct rte_flow_item *i;
+ int j, index = -ENOENT;
+
+ for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
+ if (i->type == type) {
+ index = j;
+ break;
+ }
+ }
+
+ return index;
+}
+
+static int
+ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
+{
+ /* nmode:
+ * BIT_0 = [src_ip], BIT_1 = [dst_ip]
+ * BIT_2 = [src_port], BIT_3 = [dst_port]
+ *
+ * Only below cases are supported as per our spec.
+ */
+ switch (nmode) {
+ case 0: /* 0000b */
+ fs->nat_mode = NAT_MODE_NONE;
+ break;
+ case 2: /* 0010b */
+ fs->nat_mode = NAT_MODE_DIP;
+ break;
+ case 5: /* 0101b */
+ fs->nat_mode = NAT_MODE_SIP_SP;
+ break;
+ case 7: /* 0111b */
+ fs->nat_mode = NAT_MODE_DIP_SIP_SP;
+ break;
+ case 10: /* 1010b */
+ fs->nat_mode = NAT_MODE_DIP_DP;
+ break;
+ case 11: /* 1011b */
+ fs->nat_mode = NAT_MODE_DIP_DP_SIP;
+ break;
+ case 14: /* 1110b */
+ fs->nat_mode = NAT_MODE_DIP_DP_SP;
+ break;
+ case 15: /* 1111b */
+ fs->nat_mode = NAT_MODE_ALL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+ const struct rte_flow_item items[],
+ uint8_t *nmode,
struct ch_filter_specification *fs,
struct rte_flow_error *e)
{
+ const struct rte_flow_action_of_set_vlan_vid *vlanid;
+ const struct rte_flow_action_of_push_vlan *pushvlan;
+ const struct rte_flow_action_set_ipv4 *ipv4;
+ const struct rte_flow_action_set_ipv6 *ipv6;
+ const struct rte_flow_action_set_tp *tp_port;
const struct rte_flow_action_phy_port *port;
+ int item_index;
switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
+ a->conf;
+ fs->newvlan = VLAN_REWRITE;
+ fs->vlan = vlanid->vlan_vid;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ pushvlan = (const struct rte_flow_action_of_push_vlan *)
+ a->conf;
+ if (pushvlan->ethertype != ETHER_TYPE_VLAN)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "only ethertype 0x8100 "
+ "supported for push vlan.");
+ fs->newvlan = VLAN_INSERT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ fs->newvlan = VLAN_REMOVE;
+ break;
case RTE_FLOW_ACTION_TYPE_PHY_PORT:
port = (const struct rte_flow_action_phy_port *)a->conf;
fs->eport = port->index;
break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV4);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV4 "
+ "found.");
+
+ ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
+ memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
+ *nmode |= 1 << 0;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV4);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV4 "
+ "found.");
+
+ ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
+ memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
+ *nmode |= 1 << 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV6);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV6 "
+ "found.");
+
+ ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
+ memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+ *nmode |= 1 << 0;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV6);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV6 "
+ "found.");
+
+ ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
+ memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+ *nmode |= 1 << 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_TCP);
+ if (item_index < 0) {
+ item_index =
+ cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_UDP);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_TCP or "
+ "RTE_FLOW_ITEM_TYPE_UDP found");
+ }
+
+ tp_port = (const struct rte_flow_action_set_tp *)a->conf;
+ fs->nat_fport = be16_to_cpu(tp_port->port);
+ *nmode |= 1 << 2;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_TCP);
+ if (item_index < 0) {
+ item_index =
+ cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_UDP);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_TCP or "
+ "RTE_FLOW_ITEM_TYPE_UDP found");
+ }
+
+ tp_port = (const struct rte_flow_action_set_tp *)a->conf;
+ fs->nat_lport = be16_to_cpu(tp_port->port);
+ *nmode |= 1 << 3;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_ETH "
+ "found");
+ fs->swapmac = 1;
+ break;
default:
/* We are not supposed to come here */
return rte_flow_error_set(e, EINVAL,
@@ -350,10 +574,12 @@ ch_rte_parse_atype_switch(const struct rte_flow_action *a,
static int
cxgbe_rtef_parse_actions(struct rte_flow *flow,
+ const struct rte_flow_item items[],
const struct rte_flow_action action[],
struct rte_flow_error *e)
{
struct ch_filter_specification *fs = &flow->fs;
+ uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
const struct rte_flow_action_queue *q;
const struct rte_flow_action *a;
char abit = 0;
@@ -391,7 +617,22 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
case RTE_FLOW_ACTION_TYPE_COUNT:
fs->hitcnts = 1;
break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ nat_ipv4++;
+ goto action_switch;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ nat_ipv6++;
+ goto action_switch;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+action_switch:
/* We allow multiple switch actions, but switch is
* not compatible with either queue or drop
*/
@@ -399,7 +640,14 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
"overlapping action specified");
- ret = ch_rte_parse_atype_switch(a, fs, e);
+ if (nat_ipv4 && nat_ipv6)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Can't have one address ipv4 and the"
+ " other ipv6");
+
+ ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
+ e);
if (ret)
return ret;
fs->action = FILTER_SWITCH;
@@ -412,11 +660,24 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
}
}
+ if (ch_rte_parse_nat(nmode, fs))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "invalid settings for swich action");
return 0;
}
-struct chrte_fparse parseitem[] = {
- [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
+static struct chrte_fparse parseitem[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .fptr = ch_rte_parsetype_eth,
+ .dmask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0xffff,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
.fptr = ch_rte_parsetype_port,
.dmask = &(const struct rte_flow_item_phy_port){
.index = 0x7,
@@ -454,10 +715,10 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
char repeat[ARRAY_SIZE(parseitem)] = {0};
for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
- struct chrte_fparse *idx = &flow->item_parser[i->type];
+ struct chrte_fparse *idx;
int ret;
- if (i->type > ARRAY_SIZE(parseitem))
+ if (i->type >= ARRAY_SIZE(parseitem))
return rte_flow_error_set(e, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
i, "Item not supported");
@@ -478,6 +739,7 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
if (ret)
return ret;
+ idx = &flow->item_parser[i->type];
if (!idx || !idx->fptr) {
return rte_flow_error_set(e, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, i,
@@ -503,7 +765,6 @@ cxgbe_flow_parse(struct rte_flow *flow,
struct rte_flow_error *e)
{
int ret;
-
/* parse user request into ch_filter_specification */
ret = cxgbe_rtef_parse_attr(flow, attr, e);
if (ret)
@@ -511,7 +772,7 @@ cxgbe_flow_parse(struct rte_flow *flow,
ret = cxgbe_rtef_parse_items(flow, item, e);
if (ret)
return ret;
- return cxgbe_rtef_parse_actions(flow, action, e);
+ return cxgbe_rtef_parse_actions(flow, item, action, e);
}
static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
@@ -582,6 +843,7 @@ cxgbe_flow_create(struct rte_eth_dev *dev,
flow->item_parser = parseitem;
flow->dev = dev;
+ flow->fs.private = (void *)flow;
if (cxgbe_flow_parse(flow, attr, item, action, e)) {
t4_os_free(flow);
@@ -636,6 +898,17 @@ static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
return ctx.result;
}
+ fs = &flow->fs;
+ if (fs->mask.macidx) {
+ struct port_info *pi = (struct port_info *)
+ (dev->data->dev_private);
+ int ret;
+
+ ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
+ if (!ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h
index 0f750474..718bf3d0 100644
--- a/drivers/net/cxgbe/cxgbe_flow.h
+++ b/drivers/net/cxgbe/cxgbe_flow.h
@@ -7,6 +7,7 @@
#include <rte_flow_driver.h>
#include "cxgbe_filter.h"
+#include "mps_tcam.h"
#include "cxgbe.h"
#define CXGBE_FLOW_POLL_US 10
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c3938e8d..88dc851f 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -38,6 +38,8 @@
#include "t4_msg.h"
#include "cxgbe.h"
#include "clip_tbl.h"
+#include "l2t.h"
+#include "mps_tcam.h"
/**
* Allocate a chunk of memory. The allocated memory is cleared.
@@ -99,6 +101,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_act_open_rpl *p = (const void *)rsp;
hash_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_L2T_WRITE_RPL) {
+ const struct cpl_l2t_write_rpl *p = (const void *)rsp;
+
+ do_l2t_write_rpl(q->adapter, p);
} else {
dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
opcode);
@@ -1135,13 +1141,17 @@ static int adap_init0(struct adapter *adap)
V_FW_PARAMS_PARAM_Y(0) | \
V_FW_PARAMS_PARAM_Z(0))
- params[0] = FW_PARAM_PFVF(FILTER_START);
- params[1] = FW_PARAM_PFVF(FILTER_END);
- ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ params[0] = FW_PARAM_PFVF(L2T_START);
+ params[1] = FW_PARAM_PFVF(L2T_END);
+ params[2] = FW_PARAM_PFVF(FILTER_START);
+ params[3] = FW_PARAM_PFVF(FILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val);
if (ret < 0)
goto bye;
- adap->tids.ftid_base = val[0];
- adap->tids.nftids = val[1] - val[0] + 1;
+ adap->l2t_start = val[0];
+ adap->l2t_end = val[1];
+ adap->tids.ftid_base = val[2];
+ adap->tids.nftids = val[3] - val[2] + 1;
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
@@ -1170,6 +1180,16 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+ /* See if FW supports FW_FILTER2 work request */
+ if (is_t4(adap->params.chip)) {
+ adap->params.filter2_wr_support = 0;
+ } else {
+ params[0] = FW_PARAM_DEV(FILTER2_WR);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
+ }
+
/* query tid-related parameters */
params[0] = FW_PARAM_DEV(NTID);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
@@ -1332,10 +1352,8 @@ int link_start(struct port_info *pi)
ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
-1, 1, true);
if (ret == 0) {
- ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
- pi->xact_addr_filt,
- (u8 *)&pi->eth_dev->data->mac_addrs[0],
- true, true);
+ ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
+ (u8 *)&pi->eth_dev->data->mac_addrs[0]);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
@@ -1679,10 +1697,12 @@ void cxgbe_close(struct adapter *adapter)
int i;
if (adapter->flags & FULL_INIT_DONE) {
- if (is_pf4(adapter))
- t4_intr_disable(adapter);
tid_free(&adapter->tids);
+ t4_cleanup_mpstcam(adapter);
t4_cleanup_clip_tbl(adapter);
+ t4_cleanup_l2t(adapter);
+ if (is_pf4(adapter))
+ t4_intr_disable(adapter);
t4_sge_tx_monitor_stop(adapter);
t4_free_sge_resources(adapter);
for_each_port(adapter, i) {
@@ -1690,12 +1710,7 @@ void cxgbe_close(struct adapter *adapter)
if (pi->viid != 0)
t4_free_vi(adapter, adapter->mbox,
adapter->pf, 0, pi->viid);
- rte_free(pi->eth_dev->data->mac_addrs);
- /* Skip first port since it'll be freed by DPDK stack */
- if (i) {
- rte_free(pi->eth_dev->data->dev_private);
- rte_eth_dev_release_port(pi->eth_dev);
- }
+ rte_eth_dev_release_port(pi->eth_dev);
}
adapter->flags &= ~FULL_INIT_DONE;
}
@@ -1855,12 +1870,23 @@ allocate_mac:
dev_warn(adapter, "could not allocate CLIP. Continuing\n");
}
+ adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
+ if (!adapter->l2t) {
+ /* We tolerate a lack of L2T, giving up some functionality */
+ dev_warn(adapter, "could not allocate L2T. Continuing\n");
+ }
+
if (tid_init(&adapter->tids) < 0) {
/* Disable filtering support */
dev_warn(adapter, "could not allocate TID table, "
"filter support disabled. Continuing\n");
}
+ adapter->mpstcam = t4_init_mpstcam(adapter);
+ if (!adapter->mpstcam)
+ dev_warn(adapter, "could not allocate mps tcam table."
+ " Continuing\n");
+
if (is_hashfilter(adapter)) {
if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
u32 hash_base, hash_reg;
@@ -1887,14 +1913,7 @@ out_free:
if (pi->viid != 0)
t4_free_vi(adapter, adapter->mbox, adapter->pf,
0, pi->viid);
- /* Skip first port since it'll be de-allocated by DPDK */
- if (i == 0)
- continue;
- if (pi->eth_dev) {
- if (pi->eth_dev->data->dev_private)
- rte_free(pi->eth_dev->data->dev_private);
- rte_eth_dev_release_port(pi->eth_dev);
- }
+ rte_eth_dev_release_port(pi->eth_dev);
}
if (adapter->flags & FW_OK)
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
index 4214d031..6223e125 100644
--- a/drivers/net/cxgbe/cxgbevf_main.c
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -282,14 +282,7 @@ out_free:
if (pi->viid != 0)
t4_free_vi(adapter, adapter->mbox, adapter->pf,
0, pi->viid);
- /* Skip first port since it'll be de-allocated by DPDK */
- if (i == 0)
- continue;
- if (pi->eth_dev) {
- if (pi->eth_dev->data->dev_private)
- rte_free(pi->eth_dev->data->dev_private);
- rte_eth_dev_release_port(pi->eth_dev);
- }
+ rte_eth_dev_release_port(pi->eth_dev);
}
return -err;
}
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
new file mode 100644
index 00000000..814188fe
--- /dev/null
+++ b/drivers/net/cxgbe/l2t.c
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include "common.h"
+#include "l2t.h"
+
+/**
+ * cxgbe_l2t_release - Release associated L2T entry
+ * @e: L2T entry to release
+ *
+ * Releases ref count and frees up an L2T entry from L2T table
+ */
+void cxgbe_l2t_release(struct l2t_entry *e)
+{
+ if (rte_atomic32_read(&e->refcnt) != 0)
+ rte_atomic32_dec(&e->refcnt);
+}
+
+/**
+ * Process a CPL_L2T_WRITE_RPL. Note that the TID in the reply is really
+ * the L2T index it refers to.
+ */
+void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
+{
+ struct l2t_data *d = adap->l2t;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int l2t_idx = tid % L2T_SIZE;
+
+ if (unlikely(rpl->status != CPL_ERR_NONE)) {
+ dev_err(adap,
+ "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, l2t_idx);
+ return;
+ }
+
+ if (tid & F_SYNC_WR) {
+ struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
+
+ t4_os_lock(&e->lock);
+ if (e->state != L2T_STATE_SWITCHING)
+ e->state = L2T_STATE_VALID;
+ t4_os_unlock(&e->lock);
+ }
+}
+
+/**
+ * Write an L2T entry. Must be called with the entry locked.
+ * The write may be synchronous or asynchronous.
+ */
+static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync,
+ bool loopback, bool arpmiss)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct l2t_data *d = adap->l2t;
+ struct rte_mbuf *mbuf;
+ struct cpl_l2t_write_req *req;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int l2t_idx = e->idx + d->l2t_start;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+ ctrlq = &adap->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_l2t_write_req *);
+ INIT_TP_WR(req, 0);
+
+ OPCODE_TID(req) =
+ cpu_to_be32(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
+ l2t_idx | V_SYNC_WR(sync) |
+ V_TID_QID(adap->sge.fw_evtq.abs_id)));
+ req->params = cpu_to_be16(V_L2T_W_PORT(e->lport) |
+ V_L2T_W_LPBK(loopback) |
+ V_L2T_W_ARPMISS(arpmiss) |
+ V_L2T_W_NOREPLY(!sync));
+ req->l2t_idx = cpu_to_be16(l2t_idx);
+ req->vlan = cpu_to_be16(e->vlan);
+ rte_memcpy(req->dst_mac, e->dmac, ETHER_ADDR_LEN);
+
+ if (loopback)
+ memset(req->dst_mac, 0, ETHER_ADDR_LEN);
+
+ t4_mgmt_tx(ctrlq, mbuf);
+
+ if (sync && e->state != L2T_STATE_SWITCHING)
+ e->state = L2T_STATE_SYNC_WRITE;
+
+ return 0;
+}
+
+/**
+ * find_or_alloc_l2e - Find/Allocate a free L2T entry
+ * @d: L2T table
+ * @vlan: VLAN id to compare/add
+ * @port: port id to compare/add
+ * @dmac: Destination MAC address to compare/add
+ * Returns pointer to the L2T entry found/created
+ *
+ * Finds/Allocates an L2T entry to be used by switching rule of a filter.
+ */
+static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
+ u8 port, u8 *dmac)
+{
+ struct l2t_entry *end, *e;
+ struct l2t_entry *first_free = NULL;
+
+ for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
+ if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (e->state == L2T_STATE_SWITCHING) {
+ if ((!memcmp(e->dmac, dmac, ETHER_ADDR_LEN)) &&
+ e->vlan == vlan && e->lport == port)
+ goto exists;
+ }
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto found;
+ }
+
+ return NULL;
+
+found:
+ e->state = L2T_STATE_UNUSED;
+
+exists:
+ return e;
+}
+
+static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
+ u16 vlan, u8 port,
+ u8 *eth_addr)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct l2t_data *d = adap->l2t;
+ struct l2t_entry *e;
+ int ret = 0;
+
+ t4_os_write_lock(&d->lock);
+ e = find_or_alloc_l2e(d, vlan, port, eth_addr);
+ if (e) {
+ t4_os_lock(&e->lock);
+ if (!rte_atomic32_read(&e->refcnt)) {
+ e->state = L2T_STATE_SWITCHING;
+ e->vlan = vlan;
+ e->lport = port;
+ rte_memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
+ rte_atomic32_set(&e->refcnt, 1);
+ ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
+ if (ret < 0)
+ dev_debug(adap, "Failed to write L2T entry: %d",
+ ret);
+ } else {
+ rte_atomic32_inc(&e->refcnt);
+ }
+ t4_os_unlock(&e->lock);
+ }
+ t4_os_write_unlock(&d->lock);
+
+ return ret ? NULL : e;
+}
+
+/**
+ * cxgbe_l2t_alloc_switching - Allocate a L2T entry for switching rule
+ * @dev: rte_eth_dev pointer
+ * @vlan: VLAN Id
+ * @port: Associated port
+ * @dmac: Destination MAC address to add to L2T
+ * Returns pointer to the allocated l2t entry
+ *
+ * Allocates a L2T entry for use by switching rule of a filter
+ */
+struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan,
+ u8 port, u8 *dmac)
+{
+ return t4_l2t_alloc_switching(dev, vlan, port, dmac);
+}
+
+/**
+ * Initialize L2 Table
+ */
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
+{
+ unsigned int l2t_size;
+ unsigned int i;
+ struct l2t_data *d;
+
+ if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+ return NULL;
+ l2t_size = l2t_end - l2t_start + 1;
+
+ d = t4_os_alloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
+ if (!d)
+ return NULL;
+
+ d->l2t_start = l2t_start;
+ d->l2t_size = l2t_size;
+
+ t4_os_rwlock_init(&d->lock);
+
+ for (i = 0; i < d->l2t_size; ++i) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ t4_os_lock_init(&d->l2tab[i].lock);
+ rte_atomic32_set(&d->l2tab[i].refcnt, 0);
+ }
+
+ return d;
+}
+
+/**
+ * Cleanup L2 Table
+ */
+void t4_cleanup_l2t(struct adapter *adap)
+{
+ if (adap->l2t)
+ t4_os_free(adap->l2t);
+}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
new file mode 100644
index 00000000..22a34e38
--- /dev/null
+++ b/drivers/net/cxgbe/l2t.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef _CXGBE_L2T_H_
+#define _CXGBE_L2T_H_
+
+#include "t4_msg.h"
+
+enum {
+ L2T_SIZE = 4096 /* # of L2T entries */
+};
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+/*
+ * State for the corresponding entry of the HW L2 table.
+ */
+struct l2t_entry {
+ u16 state; /* entry state */
+ u16 idx; /* entry index within in-memory table */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ u8 lport; /* destination port */
+ u8 dmac[ETHER_ADDR_LEN]; /* destination MAC address */
+ rte_spinlock_t lock; /* entry lock */
+ rte_atomic32_t refcnt; /* entry reference count */
+};
+
+struct l2t_data {
+ unsigned int l2t_start; /* start index of our piece of the L2T */
+ unsigned int l2t_size; /* number of entries in l2tab */
+ rte_rwlock_t lock; /* table rw lock */
+ struct l2t_entry l2tab[0]; /* MUST BE LAST */
+};
+
+#define L2T_LPBK true
+#define L2T_ARPMISS true
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define S_SYNC_WR 12
+#define V_SYNC_WR(x) ((x) << S_SYNC_WR)
+#define F_SYNC_WR V_SYNC_WR(1)
+
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
+void t4_cleanup_l2t(struct adapter *adap);
+struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan,
+ u8 port, u8 *dmac);
+void cxgbe_l2t_release(struct l2t_entry *e);
+void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+#endif /* _CXGBE_L2T_H_ */
diff --git a/drivers/net/cxgbe/meson.build b/drivers/net/cxgbe/meson.build
index 7c69a34b..c51af26e 100644
--- a/drivers/net/cxgbe/meson.build
+++ b/drivers/net/cxgbe/meson.build
@@ -9,6 +9,8 @@ sources = files('cxgbe_ethdev.c',
'cxgbe_filter.c',
'cxgbe_flow.c',
'clip_tbl.c',
+ 'mps_tcam.c',
+ 'l2t.c',
'base/t4_hw.c',
'base/t4vf_hw.c')
includes += include_directories('base')
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
new file mode 100644
index 00000000..02ec69a9
--- /dev/null
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "mps_tcam.h"
+
+static inline bool
+match_entry(struct mps_tcam_entry *entry, const u8 *eth_addr, const u8 *mask)
+{
+ if (!memcmp(eth_addr, entry->eth_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mask, entry->mask, ETHER_ADDR_LEN))
+ return true;
+ return false;
+}
+
+static int cxgbe_update_free_idx(struct mpstcam_table *t)
+{
+ struct mps_tcam_entry *entry = t->entry;
+ u16 i, next = t->free_idx + 1;
+
+ if (entry[t->free_idx].state == MPS_ENTRY_UNUSED)
+ /* You are already pointing to a free entry !! */
+ return 0;
+
+ /* loop, till we don't rollback to same index where we started */
+ for (i = next; i != t->free_idx; i++) {
+ if (i == t->size)
+ /* rollback and search free entry from start */
+ i = 0;
+
+ if (entry[i].state == MPS_ENTRY_UNUSED) {
+ t->free_idx = i;
+ return 0;
+ }
+ }
+
+ return -1; /* table is full */
+}
+
+static struct mps_tcam_entry *
+cxgbe_mpstcam_lookup(struct mpstcam_table *t, const u8 *eth_addr,
+ const u8 *mask)
+{
+ struct mps_tcam_entry *entry = t->entry;
+ int i;
+
+ if (!entry)
+ return NULL;
+
+ for (i = 0; i < t->size; i++) {
+ if (entry[i].state == MPS_ENTRY_UNUSED)
+ continue; /* entry is not being used */
+ if (match_entry(&entry[i], eth_addr, mask))
+ return &entry[i];
+ }
+
+ return NULL;
+}
+
+int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
+ const u8 *mask)
+{
+ struct adapter *adap = pi->adapter;
+ struct mpstcam_table *mpstcam = adap->mpstcam;
+ struct mps_tcam_entry *entry;
+ int ret;
+
+ if (!adap->mpstcam) {
+ dev_err(adap, "mpstcam table is not available\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If entry already present, return it. */
+ t4_os_write_lock(&mpstcam->lock);
+ entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
+ if (entry) {
+ rte_atomic32_add(&entry->refcnt, 1);
+ t4_os_write_unlock(&mpstcam->lock);
+ return entry->idx;
+ }
+
+ if (mpstcam->full) {
+ t4_os_write_unlock(&mpstcam->lock);
+ dev_err(adap, "mps-tcam table is full\n");
+ return -ENOMEM;
+ }
+
+ ret = t4_alloc_raw_mac_filt(adap, pi->viid, eth_addr, mask,
+ mpstcam->free_idx, 0, pi->port_id, false);
+ if (ret <= 0) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return ret;
+ }
+
+ /* Fill in the new values */
+ entry = &mpstcam->entry[ret];
+ memcpy(entry->eth_addr, eth_addr, ETHER_ADDR_LEN);
+ memcpy(entry->mask, mask, ETHER_ADDR_LEN);
+ rte_atomic32_set(&entry->refcnt, 1);
+ entry->state = MPS_ENTRY_USED;
+
+ if (cxgbe_update_free_idx(mpstcam))
+ mpstcam->full = true;
+
+ t4_os_write_unlock(&mpstcam->lock);
+ return ret;
+}
+
+int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
+{
+ struct adapter *adap = pi->adapter;
+ struct mpstcam_table *mpstcam = adap->mpstcam;
+ struct mps_tcam_entry *entry;
+
+ if (!mpstcam)
+ return -EOPNOTSUPP;
+ t4_os_write_lock(&mpstcam->lock);
+ if (idx != -1 && idx >= mpstcam->size) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return -EINVAL;
+ }
+ if (idx >= 0) {
+ entry = &mpstcam->entry[idx];
+ /* user wants to modify an existing entry.
+ * verify if entry exists
+ */
+ if (entry->state != MPS_ENTRY_USED) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return -EINVAL;
+ }
+ }
+
+ idx = t4_change_mac(adap, adap->mbox, pi->viid, idx, addr, true, true);
+ if (idx < 0) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return idx;
+ }
+
+ /* idx can now be different from what user provided */
+ entry = &mpstcam->entry[idx];
+ memcpy(entry->eth_addr, addr, ETHER_ADDR_LEN);
+ /* NOTE: we have considered the case that idx returned by t4_change_mac
+ * will be different from the user provided value only if user
+ * provided value is -1
+ */
+ if (entry->state == MPS_ENTRY_UNUSED) {
+ rte_atomic32_set(&entry->refcnt, 1);
+ entry->state = MPS_ENTRY_USED;
+ }
+
+ if (cxgbe_update_free_idx(mpstcam))
+ mpstcam->full = true;
+
+ t4_os_write_unlock(&mpstcam->lock);
+ return idx;
+}
+
+/**
+ * hold appropriate locks while calling this.
+ */
+static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
+{
+ memset(entry->eth_addr, 0, ETHER_ADDR_LEN);
+ memset(entry->mask, 0, ETHER_ADDR_LEN);
+ rte_atomic32_clear(&entry->refcnt);
+ entry->state = MPS_ENTRY_UNUSED;
+}
+
+/**
+ * ret < 0: fatal error
+ * ret = 0: entry removed in h/w
+ * ret > 0: updated refcount.
+ */
+int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
+{
+ struct adapter *adap = pi->adapter;
+ struct mpstcam_table *t = adap->mpstcam;
+ struct mps_tcam_entry *entry;
+ int ret;
+
+ if (!t)
+ return -EOPNOTSUPP;
+ t4_os_write_lock(&t->lock);
+ entry = &t->entry[idx];
+ if (entry->state == MPS_ENTRY_UNUSED) {
+ t4_os_write_unlock(&t->lock);
+ return -EINVAL;
+ }
+
+ if (rte_atomic32_read(&entry->refcnt) == 1)
+ ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
+ entry->mask, idx, 1, pi->port_id,
+ false);
+ else
+ ret = rte_atomic32_sub_return(&entry->refcnt, 1);
+
+ if (ret == 0) {
+ reset_mpstcam_entry(entry);
+ t->full = false; /* We have atleast 1 free entry */
+ cxgbe_update_free_idx(t);
+ }
+
+ t4_os_write_unlock(&t->lock);
+ return ret;
+}
+
+struct mpstcam_table *t4_init_mpstcam(struct adapter *adap)
+{
+ struct mpstcam_table *t;
+ int i;
+ u16 size = adap->params.arch.mps_tcam_size;
+
+ t = t4_os_alloc(sizeof(*t) + size * sizeof(struct mps_tcam_entry));
+ if (!t)
+ return NULL;
+
+ t4_os_rwlock_init(&t->lock);
+ t->full = false;
+ t->size = size;
+
+ for (i = 0; i < size; i++) {
+ reset_mpstcam_entry(&t->entry[i]);
+ t->entry[i].mpstcam = t;
+ t->entry[i].idx = i;
+ }
+
+ /* first entry is used by chip. this is overwritten only
+ * in t4_cleanup_mpstcam()
+ */
+ t->entry[0].state = MPS_ENTRY_USED;
+ t->free_idx = 1;
+
+ return t;
+}
+
+void t4_cleanup_mpstcam(struct adapter *adap)
+{
+ if (adap->mpstcam) {
+ t4_os_free(adap->mpstcam->entry);
+ t4_os_free(adap->mpstcam);
+ }
+}
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
new file mode 100644
index 00000000..c3d6fe0d
--- /dev/null
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_MPSTCAM_H_
+#define _CXGBE_MPSTCAM_H_
+
+#include "common.h"
+
+enum {
+ MPS_ENTRY_UNUSED, /* Keep this first so memset 0 renders
+ * the correct state. Other states can
+ * be added in future like MPS_ENTRY_BUSY
+ * to reduce contention while mboxing
+ * the request to f/w or to denote attributes
+ * for a specific entry
+ */
+ MPS_ENTRY_USED,
+};
+
+struct mps_tcam_entry {
+ u8 state;
+ u16 idx;
+
+ /* add data here which uniquely defines an entry */
+ u8 eth_addr[ETHER_ADDR_LEN];
+ u8 mask[ETHER_ADDR_LEN];
+
+ struct mpstcam_table *mpstcam; /* backptr */
+ rte_atomic32_t refcnt;
+};
+
+struct mpstcam_table {
+ u16 size;
+ rte_rwlock_t lock;
+ u16 free_idx; /* next free index */
+ bool full; /* since free index can be present
+ * anywhere in the table, size and
+ * free_idx cannot alone determine
+ * if the table is full
+ */
+ struct mps_tcam_entry entry[0];
+};
+
+struct mpstcam_table *t4_init_mpstcam(struct adapter *adap);
+void t4_cleanup_mpstcam(struct adapter *adap);
+int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *mac, const u8 *mask);
+int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx);
+int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr);
+
+#endif /* _CXGBE_MPSTCAM_H_ */
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 4ea40d19..f9d2d48a 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1873,10 +1873,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
/* Size needs to be multiple of 16, including status entry. */
iq->size = cxgbe_roundup(iq->size, 16);
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->device->driver->name,
- fwevtq ? "fwq_ring" : "rx_ring",
- eth_dev->data->port_id, queue_id);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id,
+ fwevtq ? "fwq_ring" : "rx_ring");
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
@@ -1938,10 +1937,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = s->fl_starve_thres - 1 + 2 * 8;
fl->size = cxgbe_roundup(fl->size, 8);
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->device->driver->name,
- fwevtq ? "fwq_ring" : "fl_ring",
- eth_dev->data->port_id, queue_id);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id,
+ fwevtq ? "fwq_ring" : "fl_ring");
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
fl->desc = alloc_ring(fl->size, sizeof(__be64),
@@ -2144,9 +2142,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->device->driver->name, "tx_ring",
- eth_dev->data->port_id, queue_id);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id, "tx_ring");
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
@@ -2223,9 +2220,8 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->device->driver->name, "ctrl_tx_ring",
- eth_dev->data->port_id, queue_id);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id, "ctrl_tx_ring");
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
index d7a0a50c..1c4f7d91 100644
--- a/drivers/net/dpaa/Makefile
+++ b/drivers/net/dpaa/Makefile
@@ -38,6 +38,7 @@ LDLIBS += -lrte_bus_dpaa
LDLIBS += -lrte_mempool_dpaa
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_common_dpaax
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA_PMD)-include := rte_pmd_dpaa.h
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 7a950ac0..d0572b3d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -47,16 +47,15 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup;
@@ -148,11 +147,30 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct dpaa_if *dpaa_intf = dev->data->dev_private;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
+ uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
PMD_INIT_FUNC_TRACE();
if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
return -EINVAL;
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->min_rx_buf_size &&
+ !dev->data->scattered_rx && frame_size > buffsz) {
+ DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
+ return -EINVAL;
+ }
+
+ /* check <seg size> * <max_seg> >= max_frame */
+ if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
+ (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
+ DPAA_PMD_ERR("Too big to fit for Max SG list %d",
+ buffsz * DPAA_SGT_MAX_ENTRIES);
+ return -EINVAL;
+ }
+
if (frame_size > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -194,15 +212,32 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
}
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len;
+
+ DPAA_PMD_DEBUG("enabling jumbo");
+
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- DPAA_MAX_RX_PKT_LEN) {
- fman_if_set_maxfrm(dpaa_intf->fif,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
- return 0;
- } else {
- return -1;
+ DPAA_MAX_RX_PKT_LEN)
+ max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ else {
+ DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
+ "supported is %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ DPAA_MAX_RX_PKT_LEN);
+ max_len = DPAA_MAX_RX_PKT_LEN;
}
+
+ fman_if_set_maxfrm(dpaa_intf->fif, max_len);
+ dev->data->mtu = max_len
+ - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
}
+
+ if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+ DPAA_PMD_DEBUG("enabling scatter mode");
+ fman_if_set_sg(dpaa_intf->fif, 1);
+ dev->data->scattered_rx = 1;
+ }
+
return 0;
}
@@ -300,15 +335,21 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
- dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
- dev_info->speed_capa = (ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G);
+
+ if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ dev_info->speed_capa = ETH_LINK_SPEED_1G;
+ else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
+ else
+ DPAA_PMD_ERR("invalid link_speed: %s, %d",
+ dpaa_intf->name, dpaa_intf->fif->mac_type);
+
dev_info->rx_offload_capa = dev_rx_offloads_sup |
dev_rx_offloads_nodis;
dev_info->tx_offload_capa = dev_tx_offloads_sup |
@@ -514,6 +555,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
struct qm_mcc_initfq opts = {0};
u32 flags = 0;
int ret;
+ u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
PMD_INIT_FUNC_TRACE();
@@ -527,6 +569,28 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
queue_idx, rxq->fqid);
+ /* Max packet can fit in single buffer */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
+ ;
+ } else if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ buffsz * DPAA_SGT_MAX_ENTRIES) {
+ DPAA_PMD_ERR("max RxPkt size %d too big to fit "
+ "MaxSGlist %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz * DPAA_SGT_MAX_ENTRIES);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ } else {
+ DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz - RTE_PKTMBUF_HEADROOM);
+ }
+
if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
struct fman_if_ic_params icp;
uint32_t fd_offset;
@@ -553,10 +617,13 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
fman_if_set_bp(dpaa_intf->fif, mp->size,
dpaa_intf->bp_info->bpid, bp_size);
dpaa_intf->valid = 1;
- DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
- dpaa_intf->name, fd_offset,
- fman_if_get_fdoff(dpaa_intf->fif));
+ DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
+ dpaa_intf->name, fd_offset,
+ fman_if_get_fdoff(dpaa_intf->fif));
}
+ DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
+ fman_if_get_sg_enable(dpaa_intf->fif),
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
/* checking if push mode only, no error check for now */
if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
dpaa_push_queue_idx++;
@@ -594,8 +661,13 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
"ret:%d(%s)", rxq->fqid, ret, strerror(ret));
return ret;
}
- rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
- rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
+ } else {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+ rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ }
+
rxq->is_static = true;
}
dev->data->rx_queues[queue_idx] = rxq;
@@ -630,7 +702,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
struct qm_mcc_initfq opts = {0};
if (dpaa_push_mode_max_queue)
- DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
+ DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
+ "PUSH mode already enabled for first %d queues.\n"
"To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
dpaa_push_mode_max_queue);
@@ -1012,7 +1085,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
{
struct qm_mcc_initfq opts = {0};
int ret;
- u32 flags = 0;
+ u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
struct qm_mcc_initcgr cgr_opts = {
.we_mask = QM_CGR_WE_CS_THRES |
QM_CGR_WE_CSTD_EN |
@@ -1025,15 +1098,18 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
PMD_INIT_FUNC_TRACE();
- ret = qman_reserve_fqid(fqid);
- if (ret) {
- DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
- fqid, ret);
- return -EINVAL;
+ if (fqid) {
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+ } else {
+ flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
}
-
DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
- ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ ret = qman_create_fq(fqid, flags, fq);
if (ret) {
DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
fqid, ret);
@@ -1052,7 +1128,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
if (ret) {
DPAA_PMD_WARN(
"rx taildrop init fail on rx fqid 0x%x(ret=%d)",
- fqid, ret);
+ fq->fqid, ret);
goto without_cgr;
}
opts.we_mask |= QM_INITFQ_WE_CGID;
@@ -1060,7 +1136,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
}
without_cgr:
- ret = qman_init_fq(fq, flags, &opts);
+ ret = qman_init_fq(fq, 0, &opts);
if (ret)
DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
return ret;
@@ -1213,7 +1289,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
if (default_q)
fqid = cfg->rx_def;
else
- fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx *
DPAA_PCD_FQID_MULTIPLIER + loop;
if (dpaa_intf->cgr_rx)
@@ -1304,6 +1380,9 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
fman_if_reset_mcast_filter_table(fman_intf);
/* Reset interface statistics */
fman_if_stats_reset(fman_intf);
+ /* Disable SG by default */
+ fman_if_set_sg(fman_intf, 0);
+ fman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE);
return 0;
@@ -1360,10 +1439,6 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
rte_free(dpaa_intf->tx_queues);
dpaa_intf->tx_queues = NULL;
- /* free memory for storing MAC addresses */
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -1372,7 +1447,7 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
}
static int
-rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
struct rte_dpaa_device *dpaa_dev)
{
int diag;
@@ -1456,7 +1531,6 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
}
eth_dev->device = &dpaa_dev->device;
- eth_dev->device->driver = &dpaa_drv->driver;
dpaa_dev->eth_dev = eth_dev;
/* Invoke PMD device initialization function */
@@ -1466,9 +1540,6 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
return 0;
}
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
rte_eth_dev_release_port(eth_dev);
return diag;
}
@@ -1483,9 +1554,6 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
eth_dev = dpaa_dev->eth_dev;
dpaa_dev_uninit(eth_dev);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index c79b9f86..2fc72317 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -39,9 +39,10 @@
/* Alignment to use for cpu-local structs to avoid coherency problems. */
#define MAX_CACHELINE 64
-#define DPAA_MIN_RX_BUF_SIZE 512
#define DPAA_MAX_RX_PKT_LEN 10240
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+
/* RX queue tail drop threshold (CGR Based) in frame count */
#define CGR_RX_PERFQ_THRESH 256
@@ -63,7 +64,7 @@
#define DPAA_PCD_FQID_START 0x400
#define DPAA_PCD_FQID_MULTIPLIER 0x100
#define DPAA_DEFAULT_NUM_PCD_QUEUES 1
-#define DPAA_MAX_NUM_PCD_QUEUES 32
+#define DPAA_MAX_NUM_PCD_QUEUES 4
#define DPAA_IF_TX_PRIORITY 3
#define DPAA_IF_RX_PRIORITY 0
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 168b77e4..c4471c22 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -306,8 +306,6 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
int i = 0;
uint8_t fd_offset = fd->offset;
- DPAA_DP_LOG(DEBUG, "Received an SG frame");
-
vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
if (!vaddr) {
DPAA_PMD_ERR("unable to convert physical address");
@@ -349,6 +347,8 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
}
prev_seg = cur_seg;
}
+ DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
+ first_seg->pkt_len, first_seg->nb_segs);
dpaa_eth_packet_info(first_seg, vaddr);
rte_pktmbuf_free_seg(temp);
@@ -367,22 +367,21 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
uint16_t offset;
uint32_t length;
- DPAA_DP_LOG(DEBUG, " FD--->MBUF");
-
if (unlikely(format == qm_fd_sg))
return dpaa_eth_sg_to_mbuf(fd, ifid);
- ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
-
- rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
-
offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
length = fd->opaque & DPAA_FD_LENGTH_MASK;
+ DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
+
/* Ignoring case when format != qm_fd_contig */
dpaa_display_frame(fd);
+ ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+ /* Prefetch the Parse results and packet data to L1 */
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
mbuf->data_off = offset;
mbuf->data_len = length;
@@ -398,8 +397,9 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
return mbuf;
}
+/* Specific for LS1043 */
void
-dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
+dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
void **bufs, int num_bufs)
{
struct rte_mbuf *mbuf;
@@ -411,17 +411,13 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
uint32_t length;
uint8_t format;
- if (dpaa_svr_family != SVR_LS1046A_FAMILY) {
- bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
- ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
- rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
- bufs[0] = (struct rte_mbuf *)((char *)ptr -
- bp_info->meta_data_size);
- }
+ bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
+ ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+ bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
for (i = 0; i < num_bufs; i++) {
- if (dpaa_svr_family != SVR_LS1046A_FAMILY &&
- i < num_bufs - 1) {
+ if (i < num_bufs - 1) {
bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
rte_prefetch0((void *)((uint8_t *)ptr +
@@ -458,6 +454,46 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
}
}
+void
+dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
+ void **bufs, int num_bufs)
+{
+ struct rte_mbuf *mbuf;
+ const struct qm_fd *fd;
+ struct dpaa_if *dpaa_intf;
+ uint16_t offset, i;
+ uint32_t length;
+ uint8_t format;
+
+ for (i = 0; i < num_bufs; i++) {
+ fd = &dqrr[i]->fd;
+ dpaa_intf = fq[0]->dpaa_intf;
+
+ format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
+ DPAA_FD_FORMAT_SHIFT;
+ if (unlikely(format == qm_fd_sg)) {
+ bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
+ continue;
+ }
+
+ offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
+ DPAA_FD_OFFSET_SHIFT;
+ length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+ mbuf = bufs[i];
+ mbuf->data_off = offset;
+ mbuf->data_len = length;
+ mbuf->pkt_len = length;
+ mbuf->port = dpaa_intf->ifid;
+
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
+ }
+}
+
void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
{
struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
@@ -468,8 +504,7 @@ void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
* So we prefetch the annoation beforehand, so that it is available
* in cache when accessed.
*/
- if (dpaa_svr_family == SVR_LS1046A_FAMILY)
- rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
*bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
}
@@ -870,6 +905,19 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
DPAA_TX_BURST_SIZE : nb_bufs;
for (loop = 0; loop < frames_to_send; loop++) {
mbuf = *(bufs++);
+ seqn = mbuf->seqn;
+ if (seqn != DPAA_INVALID_MBUF_SEQN) {
+ index = seqn - 1;
+ if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
+ flags[loop] =
+ ((index & QM_EQCR_DCA_IDXMASK) << 8);
+ flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ DPAA_PER_LCORE_DQRR_HELD &=
+ ~(1 << index);
+ }
+ }
+
if (likely(RTE_MBUF_DIRECT(mbuf))) {
mp = mbuf->pool;
bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
@@ -916,18 +964,6 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto send_pkts;
}
}
- seqn = mbuf->seqn;
- if (seqn != DPAA_INVALID_MBUF_SEQN) {
- index = seqn - 1;
- if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
- flags[loop] =
- ((index & QM_EQCR_DCA_IDXMASK) << 8);
- flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
- DPAA_PER_LCORE_DQRR_SIZE--;
- DPAA_PER_LCORE_DQRR_HELD &=
- ~(1 << index);
- }
- }
}
send_pkts:
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
index d3e63516..75b093c1 100644
--- a/drivers/net/dpaa/dpaa_rxtx.h
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -32,8 +32,6 @@
/* L4 Type field: TCP */
#define DPAA_L4_PARSE_RESULT_TCP 0x20
-#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
-
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
/** <Maximum number of frames to be dequeued in a single rx call*/
@@ -272,4 +270,7 @@ void dpaa_rx_cb(struct qman_fq **fq,
struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);
void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs);
+
+void dpaa_rx_cb_no_prefetch(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);
#endif
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index 9b0b1433..ca5f7a33 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -25,7 +25,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
EXPORT_MAP := rte_pmd_dpaa2_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
# depends on fslmc bus which uses experimental API
CFLAGS += -DALLOW_EXPERIMENTAL_API
@@ -40,5 +40,6 @@ LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_mempool_dpaa2
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
index 779cdf2b..adb730b7 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
@@ -213,6 +213,46 @@ struct dpaa2_annot_hdr {
#define DPAA2_L3_IPv6_UDP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \
L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL)
+/**
+ * Macros to get values in word5
+ */
+#define SHIM_OFFSET_1(var) ((uint64_t)(var) & 0xFF00000000000000)
+#define SHIM_OFFSET_2(var) ((uint64_t)(var) & 0x00FF000000000000)
+#define IP_PID_OFFSET(var) ((uint64_t)(var) & 0x0000FF0000000000)
+#define ETH_OFFSET(var) ((uint64_t)(var) & 0x000000FF00000000)
+#define LLC_SNAP_OFFSET(var) ((uint64_t)(var) & 0x00000000FF000000)
+#define VLAN_TCI_OFFSET_1(var) ((uint64_t)(var) & 0x0000000000FF0000)
+#define VLAN_TCI_OFFSET_N(var) ((uint64_t)(var) & 0x000000000000FF00)
+#define LAST_ETYPE_OFFSET(var) ((uint64_t)(var) & 0x00000000000000FF)
+
+/**
+ * Macros to get values in word6
+ */
+#define PPPOE_OFFSET(var) ((uint64_t)(var) & 0xFF00000000000000)
+#define MPLS_OFFSET_1(var) ((uint64_t)(var) & 0x00FF000000000000)
+#define MPLS_OFFSET_N(var) ((uint64_t)(var) & 0x0000FF0000000000)
+#define ARP_OR_IP_OFFSET_1(var) ((uint64_t)(var) & 0x000000FF00000000)
+#define IP_N_OR_MIN_ENCAP_OFFSET(var) ((uint64_t)(var) & 0x00000000FF000000)
+#define GRE_OFFSET(var) ((uint64_t)(var) & 0x0000000000FF0000)
+#define L4_OFFSET(var) ((uint64_t)(var) & 0x000000000000FF00)
+#define GTP_OR_ESP_OR_IPSEC_OFFSET(var) ((uint64_t)(var) & 0x00000000000000FF)
+
+/**
+ * Macros to get values in word7
+ */
+#define IPV6_ROUTING_HDR_OFFSET_1(var) ((uint64_t)(var) & 0xFF00000000000000)
+#define IPV6_ROUTING_HDR_OFFSET_2(var) ((uint64_t)(var) & 0x00FF000000000000)
+#define NEXT_HDR_OFFSET(var) ((uint64_t)(var) & 0x0000FF0000000000)
+#define IPV6_FRAG_OFFSET(var) ((uint64_t)(var) & 0x000000FF00000000)
+#define GROSS_RUNNING_SUM(var) ((uint64_t)(var) & 0x00000000FFFF0000)
+#define RUNNING_SUM(var) ((uint64_t)(var) & 0x000000000000FFFF)
+
+/**
+ * Macros to get values in word8
+ */
+#define PARSE_ERROR_CODE(var) ((uint64_t)(var) & 0xFF00000000000000)
+#define SOFT_PARSING_CONTEXT(var) ((uint64_t)(var) & 0x00FFFFFFFFFFFFFF)
+
/* Debug frame, otherwise supposed to be discarded */
#define DPAA2_ETH_FAS_DISC 0x80000000
/* MACSEC frame */
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c5047367..fa71807e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -39,7 +39,6 @@ static uint64_t dev_rx_offloads_sup =
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
/* Supported Tx offloads */
@@ -292,6 +291,35 @@ fail:
return -1;
}
+static void
+dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Queue allocation base */
+ if (priv->rx_vq[0]) {
+ /* cleaning up queue storage */
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ if (dpaa2_q->q_storage)
+ rte_free(dpaa2_q->q_storage);
+ }
+ /* cleanup tx queue cscn */
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ if (!dpaa2_q->cscn)
+ rte_free(dpaa2_q->cscn);
+ }
+ /*free memory for all queues (RX+TX) */
+ rte_free(priv->rx_vq[0]);
+ priv->rx_vq[0] = NULL;
+ }
+}
+
static int
dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
{
@@ -406,7 +434,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
}
- dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
/* update the current status */
dpaa2_dev_link_update(dev, 0);
@@ -569,7 +598,8 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
*/
cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
cong_notif_cfg.message_ctx = 0;
- cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn;
+ cong_notif_cfg.message_iova =
+ (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
cong_notif_cfg.notification_mode =
DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
@@ -867,23 +897,13 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
static void
dpaa2_dev_close(struct rte_eth_dev *dev)
{
- struct rte_eth_dev_data *data = dev->data;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- int i, ret;
+ int ret;
struct rte_eth_link link;
- struct dpaa2_queue *dpaa2_q;
PMD_INIT_FUNC_TRACE();
- for (i = 0; i < data->nb_tx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
- if (!dpaa2_q->cscn) {
- rte_free(dpaa2_q->cscn);
- dpaa2_q->cscn = NULL;
- }
- }
-
/* Clean the device first */
ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
@@ -1117,6 +1137,8 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
int32_t retcode;
uint8_t page0 = 0, page1 = 1, page2 = 2;
union dpni_statistics value;
+ int i;
+ struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
memset(&value, 0, sizeof(union dpni_statistics));
@@ -1164,6 +1186,21 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
stats->oerrors = value.page_2.egress_discarded_frames;
stats->imissed = value.page_2.ingress_nobuffer_discards;
+ /* Fill in per queue stats */
+ for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
+ (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
+ dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
+ if (dpaa2_rxq)
+ stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
+ if (dpaa2_txq)
+ stats->q_opackets[i] = dpaa2_txq->tx_pkts;
+
+ /* Byte counting is not implemented */
+ stats->q_ibytes[i] = 0;
+ stats->q_obytes[i] = 0;
+ }
+
return 0;
err:
@@ -1323,6 +1360,8 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
int32_t retcode;
+ int i;
+ struct dpaa2_queue *dpaa2_q;
PMD_INIT_FUNC_TRACE();
@@ -1335,6 +1374,19 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
if (retcode)
goto error;
+ /* Reset the per queue stats in dpaa2_queue structure */
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ if (dpaa2_q)
+ dpaa2_q->rx_pkts = 0;
+ }
+
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ if (dpaa2_q)
+ dpaa2_q->tx_pkts = 0;
+ }
+
return;
error:
@@ -1360,7 +1412,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
+ DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
return -1;
}
@@ -1422,7 +1474,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
}
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- DPAA2_PMD_ERR("Unable to get link state (%d)", ret);
+ DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
return -1;
}
@@ -1785,6 +1837,74 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
};
+/* Populate the mac address from physically available (u-boot/firmware) and/or
+ * one set by higher layers like MC (restool) etc.
+ * Returns the table of MAC entries (multiple entries)
+ */
+static int
+populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
+ struct ether_addr *mac_entry)
+{
+ int ret;
+ struct ether_addr phy_mac, prime_mac;
+
+ memset(&phy_mac, 0, sizeof(struct ether_addr));
+ memset(&prime_mac, 0, sizeof(struct ether_addr));
+
+ /* Get the physical device MAC address */
+ ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
+ phy_mac.addr_bytes);
+ if (ret) {
+ DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
+ goto cleanup;
+ }
+
+ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
+ prime_mac.addr_bytes);
+ if (ret) {
+ DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
+ goto cleanup;
+ }
+
+ /* Now that both MAC have been obtained, do:
+ * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
+ * and return phy
+ * If empty_mac(phy), return prime.
+ * if both are empty, create random MAC, set as prime and return
+ */
+ if (!is_zero_ether_addr(&phy_mac)) {
+ /* If the addresses are not same, overwrite prime */
+ if (!is_same_ether_addr(&phy_mac, &prime_mac)) {
+ ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ phy_mac.addr_bytes);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to set MAC Address: %d",
+ ret);
+ goto cleanup;
+ }
+ memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr));
+ }
+ } else if (is_zero_ether_addr(&prime_mac)) {
+ /* In case phys and prime, both are zero, create random MAC */
+ eth_random_addr(prime_mac.addr_bytes);
+ ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ prime_mac.addr_bytes);
+ if (ret) {
+ DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
+ goto cleanup;
+ }
+ }
+
+ /* prime_mac the final MAC address */
+ memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr));
+ return 0;
+
+cleanup:
+ return -1;
+}
+
static int
dpaa2_dev_init(struct rte_eth_dev *eth_dev)
{
@@ -1867,7 +1987,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
goto init_err;
}
- /* Allocate memory for storing MAC addresses */
+ /* Allocate memory for storing MAC addresses.
+ * Table of mac_filter_entries size is allocated so that RTE ether lib
+ * can add MAC entries when rte_eth_dev_mac_addr_add is called.
+ */
eth_dev->data->mac_addrs = rte_zmalloc("dpni",
ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
@@ -1878,12 +2001,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
goto init_err;
}
- ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
- priv->token,
- (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
+ ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
if (ret) {
- DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d",
- ret);
+ DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
goto init_err;
}
@@ -1927,8 +2049,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- int i, ret;
- struct dpaa2_queue *dpaa2_q;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -1942,23 +2063,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
dpaa2_dev_close(eth_dev);
- if (priv->rx_vq[0]) {
- /* cleaning up queue storage */
- for (i = 0; i < priv->nb_rx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
- if (dpaa2_q->q_storage)
- rte_free(dpaa2_q->q_storage);
- }
- /*free the all queue memory */
- rte_free(priv->rx_vq[0]);
- priv->rx_vq[0] = NULL;
- }
-
- /* free memory for storing MAC addresses */
- if (eth_dev->data->mac_addrs) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- }
+ dpaa2_free_rx_tx_queues(eth_dev);
/* Close the device at underlying layer*/
ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
@@ -2008,7 +2113,6 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
}
eth_dev->device = &dpaa2_dev->device;
- eth_dev->device->driver = &dpaa2_drv->driver;
dpaa2_dev->eth_dev = eth_dev;
eth_dev->data->rx_mbuf_alloc_failed = 0;
@@ -2023,8 +2127,6 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
return 0;
}
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
rte_eth_dev_release_port(eth_dev);
return diag;
}
@@ -2037,8 +2139,6 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
eth_dev = dpaa2_dev->eth_dev;
dpaa2_dev_uninit(eth_dev);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index ef109a62..eab943dc 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
@@ -25,18 +25,24 @@
#include "dpaa2_ethdev.h"
#include "base/dpaa2_hw_dpni_annot.h"
+static inline uint32_t __attribute__((hot))
+dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
+ struct dpaa2_annot_hdr *annotation);
+
#define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
- DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \
+ DPAA2_SET_FD_FRC(_fd, 0); \
+ DPAA2_RESET_FD_CTRL(_fd); \
+ DPAA2_RESET_FD_FLC(_fd); \
} while (0)
static inline void __attribute__((hot))
-dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
+dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd)
{
- DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc);
+ uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
m->packet_type = RTE_PTYPE_UNKNOWN;
switch (frc) {
@@ -91,29 +97,45 @@ dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
break;
- case DPAA2_PKT_TYPE_VLAN_1:
- case DPAA2_PKT_TYPE_VLAN_2:
- m->ol_flags |= PKT_RX_VLAN;
- break;
- /* More switch cases can be added */
- /* TODO: Add handling for checksum error check from FRC */
default:
- m->packet_type = RTE_PTYPE_UNKNOWN;
+ m->packet_type = dpaa2_dev_rx_parse_slow(m,
+ (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE));
}
+ m->hash.rss = fd->simple.flc_hi;
+ m->ol_flags |= PKT_RX_RSS_HASH;
}
static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
+dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
+ struct dpaa2_annot_hdr *annotation)
{
uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
+ uint16_t *vlan_tci;
+
+ DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
+ "(4)=0x%" PRIx64 "\t",
+ annotation->word3, annotation->word4);
+
+ if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
+ vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
+ (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
+ mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
+ mbuf->ol_flags |= PKT_RX_VLAN;
+ pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
+ vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
+ (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
+ mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
+ pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
+ }
- DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t",
- annotation->word4);
if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
- pkt_type = RTE_PTYPE_L2_ETHER_ARP;
+ pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
goto parse_done;
} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
- pkt_type = RTE_PTYPE_L2_ETHER;
+ pkt_type |= RTE_PTYPE_L2_ETHER;
} else {
goto parse_done;
}
@@ -135,6 +157,11 @@ dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
goto parse_done;
}
+ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
L3_IP_1_MORE_FRAGMENT |
L3_IP_N_FIRST_FRAGMENT |
@@ -173,16 +200,15 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
annotation->word4);
- /* Check offloads first */
- if (BIT_ISSET_AT_POS(annotation->word3,
- L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
- mbuf->ol_flags |= PKT_RX_VLAN;
-
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ /* Check detailed parsing requirement */
+ if (annotation->word3 & 0x7FFFFC3FFFF)
+ return dpaa2_dev_rx_parse_slow(mbuf, annotation);
+
/* Return some common types from parse processing */
switch (annotation->word4) {
case DPAA2_L3_IPv4:
@@ -205,7 +231,7 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
break;
}
- return dpaa2_dev_rx_parse_slow(annotation);
+ return dpaa2_dev_rx_parse_slow(mbuf, annotation);
}
static inline struct rte_mbuf *__attribute__((hot))
@@ -236,8 +262,7 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
first_seg->nb_segs = 1;
first_seg->next = NULL;
if (dpaa2_svr_family == SVR_LX2160A)
- dpaa2_dev_rx_parse_frc(first_seg,
- DPAA2_GET_FD_FRC_PARSE_SUM(fd));
+ dpaa2_dev_rx_parse_new(first_seg, fd);
else
first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
(void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
@@ -293,7 +318,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
*/
if (dpaa2_svr_family == SVR_LX2160A)
- dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
+ dpaa2_dev_rx_parse_new(mbuf, fd);
else
mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
(void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
@@ -476,8 +501,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
- pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_pkts;
+ pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
if (unlikely(!q_storage->active_dqs)) {
q_storage->toggle = 0;
dq_storage = q_storage->dq_storage[q_storage->toggle];
@@ -555,10 +579,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
fd = qbman_result_DQ_fd(dq_storage);
- next_fd = qbman_result_DQ_fd(dq_storage + 1);
- /* Prefetch Annotation address for the parse results */
- rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
- + DPAA2_FD_PTA_SIZE + 16));
+ if (dpaa2_svr_family != SVR_LX2160A) {
+ next_fd = qbman_result_DQ_fd(dq_storage + 1);
+ /* Prefetch Annotation address for the parse results */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(
+ next_fd) + DPAA2_FD_PTA_SIZE + 16));
+ }
if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
@@ -685,7 +711,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/*Prepare enqueue descriptor*/
qbman_eq_desc_clear(&eqdesc);
qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
- qbman_eq_desc_set_response(&eqdesc, 0, 0);
qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
dpaa2_q->flow_id, dpaa2_q->tc_index);
/*Clear the unused FD fields before sending*/
@@ -699,7 +724,8 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
goto skip_tx;
}
- frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
+ frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_pkts;
for (loop = 0; loop < frames_to_send; loop++) {
if ((*bufs)->seqn) {
@@ -712,9 +738,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
(*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
}
- fd_arr[loop].simple.frc = 0;
- DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
- DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL);
if (likely(RTE_MBUF_DIRECT(*bufs))) {
mp = (*bufs)->pool;
/* Check the basic scenario and set
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 9f228169..44b5604d 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -121,6 +121,7 @@ int dpni_create(struct fsl_mc_io *mc_io,
cmd_params->num_queues = cfg->num_queues;
cmd_params->num_tcs = cfg->num_tcs;
cmd_params->mac_filter_entries = cfg->mac_filter_entries;
+ cmd_params->num_rx_tcs = cfg->num_rx_tcs;
cmd_params->vlan_filter_entries = cfg->vlan_filter_entries;
cmd_params->qos_entries = cfg->qos_entries;
cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries);
@@ -664,9 +665,14 @@ int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
- layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
- layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
- layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
+ layout->pass_timestamp =
+ (int)dpni_get_field(rsp_params->flags, PASS_TS);
+ layout->pass_parser_result =
+ (int)dpni_get_field(rsp_params->flags, PASS_PR);
+ layout->pass_frame_status =
+ (int)dpni_get_field(rsp_params->flags, PASS_FS);
+ layout->pass_sw_opaque =
+ (int)dpni_get_field(rsp_params->flags, PASS_SWO);
layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
layout->data_align = le16_to_cpu(rsp_params->data_align);
layout->data_head_room = le16_to_cpu(rsp_params->head_room);
@@ -702,10 +708,11 @@ int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->options = cpu_to_le16(layout->options);
+ cmd_params->options = cpu_to_le16((uint16_t)layout->options);
dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
+ dpni_set_field(cmd_params->flags, PASS_SWO, layout->pass_sw_opaque);
cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
cmd_params->data_align = cpu_to_le16(layout->data_align);
cmd_params->head_room = cpu_to_le16(layout->data_head_room);
@@ -893,6 +900,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
cmd_params->rate = cpu_to_le32(cfg->rate);
cmd_params->options = cpu_to_le64(cfg->options);
+ cmd_params->advertising = cpu_to_le64(cfg->advertising);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -929,8 +937,11 @@ int dpni_get_link_state(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
+ state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID);
state->rate = le32_to_cpu(rsp_params->rate);
state->options = le64_to_cpu(rsp_params->options);
+ state->supported = le64_to_cpu(rsp_params->supported);
+ state->advertising = le64_to_cpu(rsp_params->advertising);
return 0;
}
@@ -1471,6 +1482,9 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
dpni_set_field(cmd_params->keep_hash_key,
KEEP_HASH_KEY,
cfg->fs_cfg.keep_hash_key);
+ dpni_set_field(cmd_params->keep_hash_key,
+ KEEP_ENTRIES,
+ cfg->fs_cfg.keep_entries);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1764,8 +1778,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @page: Selects the statistics page to retrieve, see
- * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
- * @param: Custom parameter for some pages used to select
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 3.
+ * @param: Custom parameter for some pages used to select
* a certain statistic source, for example the TC.
* @stat: Structure containing the statistics
*
@@ -1941,3 +1955,111 @@ int dpni_get_taildrop(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpni_set_opr() - Set Order Restoration configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated
+ * for the same TC. Value must be in range 0 to
+ * NUM_QUEUES - 1
+ * @options: Configuration mode options
+ * can be OPR_OPT_CREATE or OPR_OPT_RETIRE
+ * @cfg: Configuration options for the OPR
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg)
+{
+ struct dpni_cmd_set_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPNI_CMDID_SET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_opr *)cmd.params;
+ cmd_params->tc_id = tc;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->oloe = cfg->oloe;
+ cmd_params->oeane = cfg->oeane;
+ cmd_params->olws = cfg->olws;
+ cmd_params->oa = cfg->oa;
+ cmd_params->oprrws = cfg->oprrws;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_opr() - Retrieve Order Restoration config and query.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated
+ * for the same TC. Value must be in range 0 to
+ * NUM_QUEUES - 1
+ * @cfg: Returned OPR configuration
+ * @qry: Returned OPR query
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry)
+{
+ struct dpni_rsp_get_opr *rsp_params;
+ struct dpni_cmd_get_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_opr *)cmd.params;
+ cmd_params->index = index;
+ cmd_params->tc_id = tc;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_opr *)cmd.params;
+ cfg->oloe = rsp_params->oloe;
+ cfg->oeane = rsp_params->oeane;
+ cfg->olws = rsp_params->olws;
+ cfg->oa = rsp_params->oa;
+ cfg->oprrws = rsp_params->oprrws;
+ qry->rip = dpni_get_field(rsp_params->flags, RIP);
+ qry->enable = dpni_get_field(rsp_params->flags, OPR_ENABLE);
+ qry->nesn = le16_to_cpu(rsp_params->nesn);
+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
+ qry->tseq_nlis = dpni_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
+ qry->hseq_nlis = dpni_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
+
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
index 4de70f30..02fe8d50 100644
--- a/drivers/net/dpaa2/mc/fsl_dpkg.h
+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -71,45 +71,41 @@ struct dpkg_mask {
/**
* struct dpkg_extract - A structure for defining a single extraction
* @type: Determines how the union below is interpreted:
- * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
- * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
- * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
* @extract: Selects extraction method
+ * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @extract.from_hdr.prot: Any of the supported headers
+ * @extract.from_hdr.type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @extract.from_hdr.field: One of the supported fields (NH_FLD_)
+ * @extract.from_hdr.size: Size in bytes
+ * @extract.from_hdr.offset: Byte offset
+ * @extract.from_hdr.hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ * @extract.from_data.size: Size in bytes
+ * @extract.from_data.offset: Byte offset
+ * @extract.from_parse.size: Size in bytes
+ * @extract.from_parse.offset: Byte offset
* @num_of_byte_masks: Defines the number of valid entries in the array below;
* This is also the number of bytes to be used as masks
* @masks: Masks parameters
*/
struct dpkg_extract {
enum dpkg_extract_type type;
- /**
- * union extract - Selects extraction method
- * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
- * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
- * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
- */
union {
- /**
- * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
- * @prot: Any of the supported headers
- * @type: Defines the type of header extraction:
- * DPKG_FROM_HDR: use size & offset below;
- * DPKG_FROM_FIELD: use field, size and offset below;
- * DPKG_FULL_FIELD: use field below
- * @field: One of the supported fields (NH_FLD_)
- *
- * @size: Size in bytes
- * @offset: Byte offset
- * @hdr_index: Clear for cases not listed below;
- * Used for protocols that may have more than a single
- * header, 0 indicates an outer header;
- * Supported protocols (possible values):
- * NET_PROT_VLAN (0, HDR_INDEX_LAST);
- * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
- * NET_PROT_IP(0, HDR_INDEX_LAST);
- * NET_PROT_IPv4(0, HDR_INDEX_LAST);
- * NET_PROT_IPv6(0, HDR_INDEX_LAST);
- */
-
struct {
enum net_prot prot;
enum dpkg_extract_from_hdr_type type;
@@ -118,23 +114,10 @@ struct dpkg_extract {
uint8_t offset;
uint8_t hdr_index;
} from_hdr;
- /**
- * struct from_data
- * Used when 'type = DPKG_EXTRACT_FROM_DATA'
- * @size: Size in bytes
- * @offset: Byte offset
- */
struct {
uint8_t size;
uint8_t offset;
} from_data;
-
- /**
- * struct from_parse
- * Used when 'type = DPKG_EXTRACT_FROM_PARSE'
- * @size: Size in bytes
- * @offset: Byte offset
- */
struct {
uint8_t size;
uint8_t offset;
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index f0edcd27..de1bcb5b 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -8,6 +8,7 @@
#define __FSL_DPNI_H
#include <fsl_dpkg.h>
+#include <fsl_dpopr.h>
struct fsl_mc_io;
@@ -77,6 +78,20 @@ struct fsl_mc_io;
*/
#define DPNI_OPT_NO_FS 0x000020
+/**
+ * Enable the Order Restoration support
+ */
+#define DPNI_OPT_HAS_OPR 0x000040
+
+/**
+ * Order Point Records are shared for the entire TC
+ */
+#define DPNI_OPT_OPR_PER_TC 0x000080
+/**
+ * All Tx traffic classes will use a single sender (ignore num_queueus for tx)
+ */
+#define DPNI_OPT_SINGLE_SENDER 0x000100
+
int dpni_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpni_id,
@@ -88,71 +103,74 @@ int dpni_close(struct fsl_mc_io *mc_io,
/**
* struct dpni_cfg - Structure representing DPNI configuration
- * @mac_addr: Primary MAC address
- * @adv: Advanced parameters; default is all zeros;
- * use this structure to change default settings
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * DPNI_OPT_SINGLE_SENDER
+ * @fs_entries: Number of entries in the flow steering table.
+ * This table is used to select the ingress queue for
+ * ingress traffic, targeting a GPP core or another.
+ * In addition it can be used to discard traffic that
+ * matches the set rule. It is either an exact match table
+ * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING
+ * bit in OPTIONS field. This field is ignored if
+ * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise,
+ * value 0 defaults to 64. Maximum supported value is 1024.
+ * Note that the total number of entries is limited on the
+ * SoC to as low as 512 entries if TCAM is used.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering
+ * table. This is an exact match table used to filter
+ * ingress traffic based on VLAN IDs. Value 0 disables VLAN
+ * filtering. Maximum supported value is 16.
+ * @mac_filter_entries: Number of entries in the MAC address filtering
+ * table. This is an exact match table and allows both
+ * unicast and multicast entries. The primary MAC address
+ * of the network interface is not part of this table,
+ * this contains only entries in addition to it. This
+ * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in
+ * OPTIONS field. Otherwise, value 0 defaults to 80.
+ * Maximum supported value is 80.
+ * @num_queues: Number of Tx and Rx queues used for traffic
+ * distribution. This is orthogonal to QoS and is only
+ * used to distribute traffic to multiple GPP cores.
+ * This configuration affects the number of Tx queues
+ * (logical FQs, all associated with a single CEETM queue),
+ * Rx queues and Tx confirmation queues, if applicable.
+ * Value 0 defaults to one queue. Maximum supported value
+ * is 8.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * TCs can have different priority levels for the purpose
+ * of Tx scheduling (see DPNI_SET_TX_PRIORITIES), different
+ * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM
+ * queues for traffic classes (including class queues on
+ * Tx). Value 0 defaults to one TC. Maximum supported value
+ * is 16. There are maximum 16 TCs for Tx and 8 TCs for Rx.
+ * When num_tcs>8 Tx will use this value but Rx will have
+ * only 8 traffic classes.
+ * @num_rx_tcs: if set to other value than zero represents number
+ * of TCs used for Rx. Maximum value is 8. If set to zero the
+ * number of Rx TCs will be initialized with the value provided
+ * in num_tcs parameter.
+ * @qos_entries: Number of entries in the QoS classification table. This
+ * table is used to select the TC for ingress traffic. It
+ * is either an exact match or a TCAM table, depending on
+ * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This
+ * field is ignored if the DPNI has a single TC. Otherwise,
+ * a value of 0 defaults to 64. Maximum supported value
+ * is 64.
*/
struct dpni_cfg {
- /**
- * @options: Any combination of the following options:
- * DPNI_OPT_TX_FRM_RELEASE
- * DPNI_OPT_NO_MAC_FILTER
- * DPNI_OPT_HAS_POLICING
- * DPNI_OPT_SHARED_CONGESTION
- * DPNI_OPT_HAS_KEY_MASKING
- * DPNI_OPT_NO_FS
- * @fs_entries: Number of entries in the flow steering table.
- * This table is used to select the ingress queue for
- * ingress traffic, targeting a GPP core or another.
- * In addition it can be used to discard traffic that
- * matches the set rule. It is either an exact match table
- * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING
- * bit in OPTIONS field. This field is ignored if
- * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise,
- * value 0 defaults to 64. Maximum supported value is 1024.
- * Note that the total number of entries is limited on the
- * SoC to as low as 512 entries if TCAM is used.
- * @vlan_filter_entries: Number of entries in the VLAN address filtering
- * table. This is an exact match table used to filter
- * ingress traffic based on VLAN IDs. Value 0 disables VLAN
- * filtering. Maximum supported value is 16.
- * @mac_filter_entries: Number of entries in the MAC address filtering
- * table. This is an exact match table and allows both
- * unicast and multicast entries. The primary MAC address
- * of the network interface is not part of this table,
- * this contains only entries in addition to it. This
- * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in
- * OPTIONS field. Otherwise, value 0 defaults to 80.
- * Maximum supported value is 80.
- * @num_queues: Number of Tx and Rx queues used for traffic
- * distribution. This is orthogonal to QoS and is only
- * used to distribute traffic to multiple GPP cores.
- * This configuration affects the number of Tx queues
- * (logical FQs, all associated with a single CEETM queue),
- * Rx queues and Tx confirmation queues, if applicable.
- * Value 0 defaults to one queue. Maximum supported value
- * is 8.
- * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
- * TCs can have different priority levels for the purpose
- * of Tx scheduling (see DPNI_SET_TX_SELECTION), different
- * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM
- * queues for traffic classes (including class queues on
- * Tx). Value 0 defaults to one TC. Maximum supported value
- * is 8.
- * @qos_entries: Number of entries in the QoS classification table. This
- * table is used to select the TC for ingress traffic. It
- * is either an exact match or a TCAM table, depending on
- * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This
- * field is ignored if the DPNI has a single TC. Otherwise,
- * a value of 0 defaults to 64. Maximum supported value
- * is 64.
- */
uint32_t options;
uint16_t fs_entries;
uint8_t vlan_filter_entries;
uint8_t mac_filter_entries;
uint8_t num_queues;
uint8_t num_tcs;
+ uint8_t num_rx_tcs;
uint8_t qos_entries;
};
@@ -172,17 +190,14 @@ int dpni_destroy(struct fsl_mc_io *mc_io,
* @num_dpbp: Number of DPBPs
* @pools: Array of buffer pools parameters; The number of valid entries
* must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.priority: Priority mask that indicates TC's used with this buffer.
+ * I set to 0x00 MC will assume value 0xff.
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
*/
struct dpni_pools_cfg {
uint8_t num_dpbp;
- /**
- * struct pools - Buffer pools parameters
- * @dpbp_id: DPBP object ID
- * @priority: priority mask that indicates TC's used with this buffer.
- * I set to 0x00 MC will assume value 0xff.
- * @buffer_size: Buffer size
- * @backup_pool: Backup pool
- */
struct {
int dpbp_id;
uint8_t priority_mask;
@@ -296,6 +311,8 @@ int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
* variants,
* - 0x422 - WRIOP version 1.1.2, used on LS1088 and
* variants.
+ * - 0xC00 - WRIOP version 3.0.0, used on LX2160 and
+ * variants.
*/
struct dpni_attr {
uint32_t options;
@@ -321,6 +338,13 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io,
*/
/**
+ * Discard error. When set all discarded frames in wriop will be enqueued to
+ * error queue. To be used in dpni_set_errors_behavior() only if error_action
+ * parameter is set to DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE.
+ */
+#define DPNI_ERROR_DISC 0x80000000
+
+/**
* Extract out of frame header error
*/
#define DPNI_ERROR_EOFHE 0x00020000
@@ -408,6 +432,10 @@ int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
* Select to modify the data-tail-room setting
*/
#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+/**
+ * Select to modify the sw-opaque value setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_SW_OPAQUE 0x00000080
/**
* struct dpni_buffer_layout - Structure representing DPNI buffer layout
@@ -427,6 +455,7 @@ struct dpni_buffer_layout {
int pass_timestamp;
int pass_parser_result;
int pass_frame_status;
+ int pass_sw_opaque;
uint16_t private_data_size;
uint16_t data_align;
uint16_t data_head_room;
@@ -501,16 +530,48 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
#define DPNI_STATISTICS_CNT 7
+/**
+ * union dpni_statistics - Union describing the DPNI statistics
+ * @page_0: Page_0 statistics structure
+ * @page_0.ingress_all_frames: Ingress frame count
+ * @page_0.ingress_all_bytes: Ingress byte count
+ * @page_0.ingress_multicast_frames: Ingress multicast frame count
+ * @page_0.ingress_multicast_bytes: Ingress multicast byte count
+ * @page_0.ingress_broadcast_frames: Ingress broadcast frame count
+ * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count
+ * @page_1: Page_1 statistics structure
+ * @page_1.egress_all_frames: Egress frame count
+ * @page_1.egress_all_bytes: Egress byte count
+ * @page_1.egress_multicast_frames: Egress multicast frame count
+ * @page_1.egress_multicast_bytes: Egress multicast byte count
+ * @page_1.egress_broadcast_frames: Egress broadcast frame count
+ * @page_1.egress_broadcast_bytes: Egress broadcast byte count
+ * @page_2: Page_2 statistics structure
+ * @page_2.ingress_filtered_frames: Ingress filtered frame count
+ * @page_2.ingress_discarded_frames: Ingress discarded frame count
+ * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to
+ * lack of buffers
+ * @page_2.egress_discarded_frames: Egress discarded frame count
+ * @page_2.egress_confirmed_frames: Egress confirmed frame count
+ * @page_3: Page_3 statistics structure with values for the selected TC
+ * @page_3.ceetm_dequeue_bytes: Cumulative count of the number of bytes dequeued
+ * @page_3.ceetm_dequeue_frames: Cumulative count of the number of frames
+ * dequeued
+ * @page_3.ceetm_reject_bytes: Cumulative count of the number of bytes in all
+ * frames whose enqueue was rejected
+ * @page_3.ceetm_reject_frames: Cumulative count of all frame enqueues rejected
+ * @page_4: congestion point drops for seleted TC
+ * @page_4.cgr_reject_frames: number of rejected frames due to congestion point
+ * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
+ * @page_5: policer statistics per TC
+ * @page_5.policer_cnt_red: NUmber of red colored frames
+ * @page_5.policer_cnt_yellow: number of yellow colored frames
+ * @page_5.policer_cnt_green: number of green colored frames
+ * @page_5.policer_cnt_re_red: number of recolored red frames
+ * @page_5.policer_cnt_re_yellow: number of recolored yellow frames
+ * @raw: raw statistics structure, used to index counters
+ */
union dpni_statistics {
- /**
- * struct page_0 - Page_0 statistics structure
- * @ingress_all_frames: Ingress frame count
- * @ingress_all_bytes: Ingress byte count
- * @ingress_multicast_frames: Ingress multicast frame count
- * @ingress_multicast_bytes: Ingress multicast byte count
- * @ingress_broadcast_frames: Ingress broadcast frame count
- * @ingress_broadcast_bytes: Ingress broadcast byte count
- */
struct {
uint64_t ingress_all_frames;
uint64_t ingress_all_bytes;
@@ -519,15 +580,6 @@ union dpni_statistics {
uint64_t ingress_broadcast_frames;
uint64_t ingress_broadcast_bytes;
} page_0;
- /**
- * struct page_1 - Page_1 statistics structure
- * @egress_all_frames: Egress frame count
- * @egress_all_bytes: Egress byte count
- * @egress_multicast_frames: Egress multicast frame count
- * @egress_multicast_bytes: Egress multicast byte count
- * @egress_broadcast_frames: Egress broadcast frame count
- * @egress_broadcast_bytes: Egress broadcast byte count
- */
struct {
uint64_t egress_all_frames;
uint64_t egress_all_bytes;
@@ -536,15 +588,6 @@ union dpni_statistics {
uint64_t egress_broadcast_frames;
uint64_t egress_broadcast_bytes;
} page_1;
- /**
- * struct page_2 - Page_2 statistics structure
- * @ingress_filtered_frames: Ingress filtered frame count
- * @ingress_discarded_frames: Ingress discarded frame count
- * @ingress_nobuffer_discards: Ingress discarded frame count due to
- * lack of buffers
- * @egress_discarded_frames: Egress discarded frame count
- * @egress_confirmed_frames: Egress confirmed frame count
- */
struct {
uint64_t ingress_filtered_frames;
uint64_t ingress_discarded_frames;
@@ -552,26 +595,23 @@ union dpni_statistics {
uint64_t egress_discarded_frames;
uint64_t egress_confirmed_frames;
} page_2;
- /**
- * struct page_3 - Page_3 statistics structure with values for the
- * selected TC
- * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
- * dequeued
- * @ceetm_dequeue_frames: Cumulative count of the number of frames
- * dequeued
- * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
- * frames whose enqueue was rejected
- * @ceetm_reject_frames: Cumulative count of all frame enqueues rejected
- */
struct {
uint64_t ceetm_dequeue_bytes;
uint64_t ceetm_dequeue_frames;
uint64_t ceetm_reject_bytes;
uint64_t ceetm_reject_frames;
} page_3;
- /**
- * struct raw - raw statistics structure, used to index counters
- */
+ struct {
+ uint64_t cgr_reject_frames;
+ uint64_t cgr_reject_bytes;
+ } page_4;
+ struct {
+ uint64_t policer_cnt_red;
+ uint64_t policer_cnt_yellow;
+ uint64_t policer_cnt_green;
+ uint64_t policer_cnt_re_red;
+ uint64_t policer_cnt_re_yellow;
+ } page_5;
struct {
uint64_t counter[DPNI_STATISTICS_CNT];
} raw;
@@ -602,10 +642,12 @@ union dpni_statistics {
* struct - Structure representing DPNI link configuration
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
*/
struct dpni_link_cfg {
uint32_t rate;
uint64_t options;
+ uint64_t advertising;
};
int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
@@ -618,11 +660,17 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
* @up: Link state; '0' for down, '1' for up
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
*/
struct dpni_link_state {
uint32_t rate;
uint64_t options;
int up;
+ int state_valid;
+ uint64_t supported;
+ uint64_t advertising;
};
int dpni_get_link_state(struct fsl_mc_io *mc_io,
@@ -750,11 +798,20 @@ enum dpni_fs_miss_action {
* struct dpni_fs_tbl_cfg - Flow Steering table configuration
* @miss_action: Miss action selection
* @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ * @keep_hash_key: used only when miss_action is set to DPNI_FS_MISS_HASH. When
+ * set to one unclassified frames will be distributed according to previous
+ * used hash key. If set to zero hash key will be replaced with the key
+ * provided for flow steering.
+ * @keep_entries: if set to one command will not delete the entries that already
+ * exist into FS table. Use this option with caution: if the table
+ * entries are not compatible with the distribution key the packets
+ * will not be classified properly.
*/
struct dpni_fs_tbl_cfg {
enum dpni_fs_miss_action miss_action;
uint16_t default_flow_id;
char keep_hash_key;
+ uint8_t keep_entries;
};
/**
@@ -915,34 +972,52 @@ int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
/**
* struct dpni_queue - Queue structure
- * @user_context: User data, presented to the user along with any frames
- * from this queue. Not relevant for Tx queues.
+ * @destination - Destination structure
+ * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object.
+ * Not relevant for Tx queues.
+ * @destination.type: May be one of the following:
+ * 0 - No destination, queue can be manually
+ * queried, but will not push traffic or
+ * notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic
+ * becomes available in the queue a FQDAN
+ * (FQ data available notification) will be
+ * generated to selected DPIO;
+ * 2 - The destination is a DPCON. The queue is
+ * associated with a DPCON object for the
+ * purpose of scheduling between multiple
+ * queues. The DPCON may be independently
+ * configured to generate notifications.
+ * Not relevant for Tx queues.
+ * @destination.hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are
+ * not affined to a single DPIO.
+ * @user_context: User data, presented to the user along with any frames
+ * from this queue. Not relevant for Tx queues.
+ * @flc: FD FLow Context structure
+ * @flc.value: Default FLC value for traffic dequeued from
+ * this queue. Please check description of FD
+ * structure for more information.
+ * Note that FLC values set using dpni_add_fs_entry,
+ * if any, take precedence over values per queue.
+ * @flc.stash_control: Boolean, indicates whether the 6 lowest
+ * - significant bits are used for stash control.
+ * significant bits are used for stash control. If set, the 6
+ * least significant bits in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context
+ * that are stashed. FLC value is interpreted as a memory address
+ * in this case, excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame
+ * annotation to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame
+ * data to be stashed. Frame data is placed at FD[ADDR] +
+ * FD[OFFSET].
+ * For more details check the Frame Descriptor section in the
+ * hardware documentation.
*/
struct dpni_queue {
- /**
- * struct destination - Destination structure
- * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
- * Identifies either a DPIO or a DPCON object.
- * Not relevant for Tx queues.
- * @type: May be one of the following:
- * 0 - No destination, queue can be manually
- * queried, but will not push traffic or
- * notifications to a DPIO;
- * 1 - The destination is a DPIO. When traffic
- * becomes available in the queue a FQDAN
- * (FQ data available notification) will be
- * generated to selected DPIO;
- * 2 - The destination is a DPCON. The queue is
- * associated with a DPCON object for the
- * purpose of scheduling between multiple
- * queues. The DPCON may be independently
- * configured to generate notifications.
- * Not relevant for Tx queues.
- * @hold_active: Hold active, maintains a queue scheduled for longer
- * in a DPIO during dequeue to reduce spread of traffic.
- * Only relevant if queues are
- * not affined to a single DPIO.
- */
struct {
uint16_t id;
enum dpni_dest type;
@@ -950,28 +1025,6 @@ struct dpni_queue {
uint8_t priority;
} destination;
uint64_t user_context;
- /**
- * struct flc - FD FLow Context structure
- * @value: Default FLC value for traffic dequeued from
- * this queue. Please check description of FD
- * structure for more information.
- * Note that FLC values set using dpni_add_fs_entry,
- * if any, take precedence over values per queue.
- * @stash_control: Boolean, indicates whether the 6 lowest
- * - significant bits are used for stash control.
- * significant bits are used for stash control. If set, the 6
- * least significant bits in value are interpreted as follows:
- * - bits 0-1: indicates the number of 64 byte units of context
- * that are stashed. FLC value is interpreted as a memory address
- * in this case, excluding the 6 LS bits.
- * - bits 2-3: indicates the number of 64 byte units of frame
- * annotation to be stashed. Annotation is placed at FD[ADDR].
- * - bits 4-5: indicates the number of 64 byte units of frame
- * data to be stashed. Frame data is placed at FD[ADDR] +
- * FD[OFFSET].
- * For more details check the Frame Descriptor section in the
- * hardware documentation.
- */
struct {
uint64_t value;
char stash_control;
@@ -1132,4 +1185,21 @@ int dpni_get_taildrop(struct fsl_mc_io *mc_io,
uint8_t tc,
uint8_t q_index,
struct dpni_taildrop *taildrop);
+
+int dpni_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg);
+
+int dpni_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index eb3e9987..3df5bcf1 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -9,19 +9,21 @@
/* DPNI Version */
#define DPNI_VER_MAJOR 7
-#define DPNI_VER_MINOR 3
+#define DPNI_VER_MINOR 8
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_VERSION_2 2
+#define DPNI_CMD_VERSION_3 3
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_2)
+#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_3)
/* Command IDs */
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
+#define DPNI_CMDID_CREATE DPNI_CMD_V2(0x901)
#define DPNI_CMDID_DESTROY DPNI_CMD(0x981)
#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
@@ -44,10 +46,10 @@
#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
#define DPNI_CMDID_GET_SP_INFO DPNI_CMD(0x211)
#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
-#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD_V2(0x215)
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
-#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD_V2(0x21A)
#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
@@ -65,7 +67,7 @@
#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
#define DPNI_CMDID_CLR_VLAN_FILTERS DPNI_CMD(0x233)
-#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V2(0x235)
+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V3(0x235)
#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
@@ -76,8 +78,8 @@
#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
-#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
-#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD_V2(0x264)
+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD_V2(0x265)
#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
@@ -87,6 +89,8 @@
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
+#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
+#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -113,6 +117,7 @@ struct dpni_cmd_create {
uint8_t qos_entries;
uint8_t pad3;
uint16_t fs_entries;
+ uint8_t num_rx_tcs;
};
struct dpni_cmd_destroy {
@@ -228,6 +233,8 @@ struct dpni_cmd_set_errors_behavior {
#define DPNI_PASS_PR_SIZE 1
#define DPNI_PASS_FS_SHIFT 2
#define DPNI_PASS_FS_SIZE 1
+#define DPNI_PASS_SWO_SHIFT 3
+#define DPNI_PASS_SWO_SIZE 1
struct dpni_cmd_get_buffer_layout {
uint8_t qtype;
@@ -307,10 +314,13 @@ struct dpni_cmd_set_link_cfg {
uint32_t rate;
uint32_t pad1;
uint64_t options;
+ uint64_t advertising;
};
#define DPNI_LINK_STATE_SHIFT 0
#define DPNI_LINK_STATE_SIZE 1
+#define DPNI_STATE_VALID_SHIFT 1
+#define DPNI_STATE_VALID_SIZE 1
struct dpni_rsp_get_link_state {
uint32_t pad0;
@@ -320,6 +330,8 @@ struct dpni_rsp_get_link_state {
uint32_t rate;
uint32_t pad2;
uint64_t options;
+ uint64_t supported;
+ uint64_t advertising;
};
struct dpni_cmd_set_max_frame_length {
@@ -415,6 +427,8 @@ struct dpni_cmd_set_tx_priorities {
#define DPNI_MISS_ACTION_SIZE 4
#define DPNI_KEEP_HASH_KEY_SHIFT 7
#define DPNI_KEEP_HASH_KEY_SIZE 1
+#define DPNI_KEEP_ENTRIES_SHIFT 6
+#define DPNI_KEEP_ENTRIES_SIZE 1
struct dpni_cmd_set_rx_tc_dist {
uint16_t dist_size;
@@ -601,5 +615,64 @@ struct dpni_rsp_get_congestion_notification {
uint32_t threshold_exit;
};
+struct dpni_cmd_set_opr {
+ uint8_t pad0;
+ uint8_t tc_id;
+ uint8_t index;
+ uint8_t options;
+ uint8_t pad1[7];
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+};
+
+struct dpni_cmd_get_opr {
+ uint8_t pad;
+ uint8_t tc_id;
+ uint8_t index;
+};
+
+#define DPNI_RIP_SHIFT 0
+#define DPNI_RIP_SIZE 1
+#define DPNI_OPR_ENABLE_SHIFT 1
+#define DPNI_OPR_ENABLE_SIZE 1
+#define DPNI_TSEQ_NLIS_SHIFT 0
+#define DPNI_TSEQ_NLIS_SIZE 1
+#define DPNI_HSEQ_NLIS_SHIFT 0
+#define DPNI_HSEQ_NLIS_SIZE 1
+
+struct dpni_rsp_get_opr {
+ uint64_t pad0;
+ /* from LSB: rip:1 enable:1 */
+ uint8_t flags;
+ uint16_t pad1;
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+ uint16_t nesn;
+ uint16_t pad8;
+ uint16_t ndsn;
+ uint16_t pad2;
+ uint16_t ea_tseq;
+ /* only the LSB */
+ uint8_t tseq_nlis;
+ uint8_t pad3;
+ uint16_t ea_hseq;
+ /* only the LSB */
+ uint8_t hseq_nlis;
+ uint8_t pad4;
+ uint16_t ea_hptr;
+ uint16_t pad5;
+ uint16_t ea_tptr;
+ uint16_t pad6;
+ uint16_t opr_vid;
+ uint16_t pad7;
+ uint16_t opr_id;
+};
+
#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h
index 964870ba..0dc0131b 100644
--- a/drivers/net/dpaa2/mc/fsl_net.h
+++ b/drivers/net/dpaa2/mc/fsl_net.h
@@ -180,7 +180,7 @@
#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
-#define NH_FLD_SCTP_CHUNK_DATA_BEGGINNING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build
index 213f0d72..b3459525 100644
--- a/drivers/net/dpaa2/meson.build
+++ b/drivers/net/dpaa2/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index 9c87e883..0ed62765 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -10,6 +10,7 @@ LIB = librte_pmd_e1000.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
diff --git a/drivers/net/e1000/base/e1000_82571.c b/drivers/net/e1000/base/e1000_82571.c
index 7c279dbb..397dd948 100644
--- a/drivers/net/e1000/base/e1000_82571.c
+++ b/drivers/net/e1000/base/e1000_82571.c
@@ -1257,6 +1257,11 @@ STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw)
*/
e1000_clear_hw_cntrs_82571(hw);
+ /* MSI-X configure for 82574 */
+ if (mac->type == e1000_82574)
+ E1000_WRITE_REG(hw, E1000_IVAR,
+ (E1000_IVAR_INT_ALLOC_VALID << 16));
+
return ret_val;
}
diff --git a/drivers/net/e1000/base/e1000_osdep.h b/drivers/net/e1000/base/e1000_osdep.h
index b8868049..5958ea15 100644
--- a/drivers/net/e1000/base/e1000_osdep.h
+++ b/drivers/net/e1000/base/e1000_osdep.h
@@ -48,7 +48,7 @@
#include "../e1000_logs.h"
-#define DELAY(x) rte_delay_us(x)
+#define DELAY(x) rte_delay_us_sleep(x)
#define usec_delay(x) DELAY(x)
#define usec_delay_irq(x) DELAY(x)
#define msec_delay(x) DELAY(1000*(x))
diff --git a/drivers/net/e1000/base/meson.build b/drivers/net/e1000/base/meson.build
index 5e1716de..f26f2429 100644
--- a/drivers/net/e1000/base/meson.build
+++ b/drivers/net/e1000/base/meson.build
@@ -25,6 +25,9 @@ error_cflags = ['-Wno-uninitialized', '-Wno-unused-parameter',
'-Wno-unused-variable', '-Wno-misleading-indentation',
'-Wno-implicit-fallthrough']
c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
foreach flag: error_cflags
if cc.has_argument(flag)
c_args += flag
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 902001f3..94edff08 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -236,7 +236,8 @@ struct igb_ethertype_filter {
struct igb_rte_flow_rss_conf {
struct rte_flow_action_rss conf; /**< RSS parameters. */
uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
- uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+ /* Queues indices to use. */
+ uint16_t queue[IGB_MAX_RX_QUEUE_NUM_82576];
};
/*
@@ -506,7 +507,8 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
-int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+int igb_rss_conf_init(struct rte_eth_dev *dev,
+ struct igb_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
int igb_action_rss_same(const struct rte_flow_action_rss *comp,
const struct rte_flow_action_rss *with);
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 053e855b..8230824e 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -329,9 +329,6 @@ eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
@@ -1444,7 +1441,8 @@ eth_em_interrupt_setup(struct rte_eth_dev *dev)
/* clear interrupt */
E1000_READ_REG(hw, E1000_ICR);
regval = E1000_READ_REG(hw, E1000_IMS);
- E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC);
+ E1000_WRITE_REG(hw, E1000_IMS,
+ regval | E1000_ICR_LSC | E1000_ICR_OTHER);
return 0;
}
@@ -1494,7 +1492,7 @@ em_rxq_intr_enable(struct e1000_hw *hw)
static void
em_lsc_intr_disable(struct e1000_hw *hw)
{
- E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC);
+ E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER);
E1000_WRITE_FLUSH(hw);
}
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 7d2ac4eb..a9cd7651 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1160,6 +1160,7 @@ em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
RTE_SET_USED(dev);
tx_offload_capa =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -1363,7 +1364,6 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
if (max_rx_pktlen > ETHER_MAX_LEN)
@@ -1417,12 +1417,13 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
}
/*
- * EM devices don't support drop_en functionality
+ * EM devices don't support drop_en functionality.
+ * It's an optimization that does nothing on single-queue devices,
+ * so just log the issue and carry on.
*/
if (rx_conf->rx_drop_en) {
- PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
+ PMD_INIT_LOG(NOTICE, "drop_en functionality not supported by "
"device");
- return -EINVAL;
}
/* Free memory prior to re-allocation if needed. */
@@ -1459,7 +1460,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -1795,7 +1796,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -1877,7 +1878,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
/* Setup the Receive Control Register. */
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
else
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 64dfe683..d9d29d22 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -74,6 +74,7 @@ static void eth_igb_stop(struct rte_eth_dev *dev);
static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
static void eth_igb_close(struct rte_eth_dev *dev);
+static int eth_igb_reset(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
@@ -351,6 +352,7 @@ static const struct eth_dev_ops eth_igb_ops = {
.dev_set_link_up = eth_igb_dev_set_link_up,
.dev_set_link_down = eth_igb_dev_set_link_down,
.dev_close = eth_igb_close,
+ .dev_reset = eth_igb_reset,
.promiscuous_enable = eth_igb_promiscuous_enable,
.promiscuous_disable = eth_igb_promiscuous_disable,
.allmulticast_enable = eth_igb_allmulticast_enable,
@@ -915,9 +917,6 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
/* Reset any pending lock */
igb_reset_swfw_lock(hw);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
/* uninitialize PF if max_vfs not zero */
igb_pf_host_uninit(eth_dev);
@@ -1071,9 +1070,6 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
/* disable uio intr before callback unregister */
rte_intr_disable(&pci_dev->intr_handle);
rte_intr_callback_unregister(&pci_dev->intr_handle,
@@ -1593,6 +1589,33 @@ eth_igb_close(struct rte_eth_dev *dev)
rte_eth_linkstatus_set(dev, &link);
}
+/*
+ * Reset PF device.
+ */
+static int
+eth_igb_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific and is currently not implemented.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_igb_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_igb_dev_init(dev);
+
+ return ret;
+}
+
+
static int
igb_get_rx_buffer_size(struct e1000_hw *hw)
{
@@ -3197,14 +3220,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
}
#endif
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
index 07385291..33683498 100644
--- a/drivers/net/e1000/igb_flow.c
+++ b/drivers/net/e1000/igb_flow.c
@@ -1307,6 +1307,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *rss_conf,
struct rte_flow_error *error)
{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
const struct rte_flow_action *act;
const struct rte_flow_action_rss *rss;
uint16_t n, index;
@@ -1357,11 +1358,14 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS hash key must be exactly 40 bytes");
- if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ if (((hw->mac.type == e1000_82576) &&
+ (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
+ ((hw->mac.type != e1000_82576) &&
+ (rss->queue_num > IGB_MAX_RX_QUEUE_NUM)))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"too many queues for RSS context");
- if (igb_rss_conf_init(rss_conf, rss))
+ if (igb_rss_conf_init(dev, rss_conf, rss))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS context initialization failure");
@@ -1574,7 +1578,7 @@ igb_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- igb_rss_conf_init(&rss_filter_ptr->filter_info,
+ igb_rss_conf_init(dev, &rss_filter_ptr->filter_info,
&rss_conf.conf);
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index b955068a..25ff5f68 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1452,10 +1452,10 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
uint64_t
igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
{
- uint64_t rx_offload_capa;
+ uint64_t tx_offload_capa;
RTE_SET_USED(dev);
- rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
@@ -1463,17 +1463,17 @@ igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_MULTI_SEGS;
- return rx_offload_capa;
+ return tx_offload_capa;
}
uint64_t
igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
{
- uint64_t rx_queue_offload_capa;
+ uint64_t tx_queue_offload_capa;
- rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
+ tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
- return rx_queue_offload_capa;
+ return tx_queue_offload_capa;
}
int
@@ -1638,7 +1638,6 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
@@ -1721,7 +1720,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -2374,7 +2373,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -2506,7 +2505,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
/* clear STRCRC bit in all queues */
@@ -2852,11 +2851,17 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
}
int
-igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+igb_rss_conf_init(struct rte_eth_dev *dev,
+ struct igb_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in)
{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
if (in->key_len > RTE_DIM(out->key) ||
- in->queue_num > RTE_DIM(out->queue))
+ ((hw->mac.type == e1000_82576) &&
+ (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
+ ((hw->mac.type != e1000_82576) &&
+ (in->queue_num > IGB_MAX_RX_QUEUE_NUM)))
return -EINVAL;
out->conf = (struct rte_flow_action_rss){
.func = in->func,
@@ -2945,7 +2950,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
igb_hw_rss_hash_set(hw, &rss_conf);
- if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf))
return -EINVAL;
return 0;
diff --git a/drivers/net/e1000/meson.build b/drivers/net/e1000/meson.build
index cf456995..d0901d37 100644
--- a/drivers/net/e1000/meson.build
+++ b/drivers/net/e1000/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+allow_experimental_apis = true
+
subdir('base')
objs = [base_objs]
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index c255dc6d..0c0ed930 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -55,7 +55,7 @@
#define DRV_MODULE_VER_MAJOR 1
#define DRV_MODULE_VER_MINOR 1
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_VER_SUBMINOR 1
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
@@ -239,6 +239,8 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring);
static void ena_tx_queue_release_bufs(struct ena_ring *ring);
static int ena_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static int ena_create_io_queue(struct ena_ring *ring);
+static void ena_free_io_queues_all(struct ena_adapter *adapter);
static int ena_queue_restart(struct ena_ring *ring);
static int ena_queue_restart_all(struct rte_eth_dev *dev,
enum ena_ring_type ring_type);
@@ -510,7 +512,8 @@ static void ena_close(struct rte_eth_dev *dev)
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
- ena_stop(dev);
+ if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
+ ena_stop(dev);
adapter->state = ENA_ADAPTER_STATE_CLOSED;
ena_rx_queue_release_all(dev);
@@ -746,21 +749,12 @@ static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
static void ena_rx_queue_release(void *queue)
{
struct ena_ring *ring = (struct ena_ring *)queue;
- struct ena_adapter *adapter = ring->adapter;
- int ena_qid;
ena_assert_msg(ring->configured,
"API violation - releasing not configured queue");
ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
"API violation");
- /* Destroy HW queue */
- ena_qid = ENA_IO_RXQ_IDX(ring->id);
- ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
-
- /* Free all bufs */
- ena_rx_queue_release_bufs(ring);
-
/* Free ring resources */
if (ring->rx_buffer_info)
rte_free(ring->rx_buffer_info);
@@ -779,18 +773,12 @@ static void ena_rx_queue_release(void *queue)
static void ena_tx_queue_release(void *queue)
{
struct ena_ring *ring = (struct ena_ring *)queue;
- struct ena_adapter *adapter = ring->adapter;
- int ena_qid;
ena_assert_msg(ring->configured,
"API violation. Releasing not configured queue");
ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING,
"API violation");
- /* Destroy HW queue */
- ena_qid = ENA_IO_TXQ_IDX(ring->id);
- ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid);
-
/* Free all bufs */
ena_tx_queue_release_bufs(ring);
@@ -1078,10 +1066,86 @@ static void ena_stop(struct rte_eth_dev *dev)
(struct ena_adapter *)(dev->data->dev_private);
rte_timer_stop_sync(&adapter->timer_wd);
+ ena_free_io_queues_all(adapter);
adapter->state = ENA_ADAPTER_STATE_STOPPED;
}
+static int ena_create_io_queue(struct ena_ring *ring)
+{
+ struct ena_adapter *adapter;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_create_io_ctx ctx =
+ /* policy set to _HOST just to satisfy icc compiler */
+ { ENA_ADMIN_PLACEMENT_POLICY_HOST,
+ 0, 0, 0, 0, 0 };
+ uint16_t ena_qid;
+ int rc;
+
+ adapter = ring->adapter;
+ ena_dev = &adapter->ena_dev;
+
+ if (ring->type == ENA_RING_TYPE_TX) {
+ ena_qid = ENA_IO_TXQ_IDX(ring->id);
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
+ ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
+ ctx.queue_size = adapter->tx_ring_size;
+ } else {
+ ena_qid = ENA_IO_RXQ_IDX(ring->id);
+ ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
+ ctx.queue_size = adapter->rx_ring_size;
+ }
+ ctx.qid = ena_qid;
+ ctx.msix_vector = -1; /* interrupts not used */
+ ctx.numa_node = ena_cpu_to_node(ring->id);
+
+ rc = ena_com_create_io_queue(ena_dev, &ctx);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "failed to create io queue #%d (qid:%d) rc: %d\n",
+ ring->id, ena_qid, rc);
+ return rc;
+ }
+
+ rc = ena_com_get_io_handlers(ena_dev, ena_qid,
+ &ring->ena_com_io_sq,
+ &ring->ena_com_io_cq);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to get io queue handlers. queue num %d rc: %d\n",
+ ring->id, rc);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
+ }
+
+ if (ring->type == ENA_RING_TYPE_TX)
+ ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
+
+ return 0;
+}
+
+static void ena_free_io_queues_all(struct ena_adapter *adapter)
+{
+ struct rte_eth_dev *eth_dev = adapter->rte_dev;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int i;
+ uint16_t ena_qid;
+ uint16_t nb_rxq = eth_dev->data->nb_rx_queues;
+ uint16_t nb_txq = eth_dev->data->nb_tx_queues;
+
+ for (i = 0; i < nb_txq; ++i) {
+ ena_qid = ENA_IO_TXQ_IDX(i);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ }
+
+ for (i = 0; i < nb_rxq; ++i) {
+ ena_qid = ENA_IO_RXQ_IDX(i);
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+
+ ena_rx_queue_release_bufs(&adapter->rx_ring[i]);
+ }
+}
+
static int ena_queue_restart(struct ena_ring *ring)
{
int rc, bufs_num;
@@ -1089,6 +1153,12 @@ static int ena_queue_restart(struct ena_ring *ring)
ena_assert_msg(ring->configured == 1,
"Trying to restart unconfigured queue\n");
+ rc = ena_create_io_queue(ring);
+ if (rc) {
+ PMD_INIT_LOG(ERR, "Failed to create IO queue!\n");
+ return rc;
+ }
+
ring->next_to_clean = 0;
ring->next_to_use = 0;
@@ -1111,17 +1181,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
__rte_unused unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct ena_com_create_io_ctx ctx =
- /* policy set to _HOST just to satisfy icc compiler */
- { ENA_ADMIN_PLACEMENT_POLICY_HOST,
- ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 };
struct ena_ring *txq = NULL;
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
unsigned int i;
- int ena_qid;
- int rc;
- struct ena_com_dev *ena_dev = &adapter->ena_dev;
txq = &adapter->tx_ring[queue_idx];
@@ -1146,37 +1209,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- ena_qid = ENA_IO_TXQ_IDX(queue_idx);
-
- ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
- ctx.qid = ena_qid;
- ctx.msix_vector = -1; /* admin interrupts not used */
- ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
- ctx.queue_size = adapter->tx_ring_size;
- ctx.numa_node = ena_cpu_to_node(queue_idx);
-
- rc = ena_com_create_io_queue(ena_dev, &ctx);
- if (rc) {
- RTE_LOG(ERR, PMD,
- "failed to create io TX queue #%d (qid:%d) rc: %d\n",
- queue_idx, ena_qid, rc);
- return rc;
- }
- txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
- txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
-
- rc = ena_com_get_io_handlers(ena_dev, ena_qid,
- &txq->ena_com_io_sq,
- &txq->ena_com_io_cq);
- if (rc) {
- RTE_LOG(ERR, PMD,
- "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
- queue_idx, rc);
- goto err_destroy_io_queue;
- }
-
- ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node);
-
txq->port_id = dev->data->port_id;
txq->next_to_clean = 0;
txq->next_to_use = 0;
@@ -1188,8 +1220,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->tx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
- rc = -ENOMEM;
- goto err_destroy_io_queue;
+ return -ENOMEM;
}
txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
@@ -1197,8 +1228,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->empty_tx_reqs) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
- rc = -ENOMEM;
- goto err_free;
+ rte_free(txq->tx_buffer_info);
+ return -ENOMEM;
}
for (i = 0; i < txq->ring_size; i++)
@@ -1214,13 +1245,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
dev->data->tx_queues[queue_idx] = txq;
return 0;
-
-err_free:
- rte_free(txq->tx_buffer_info);
-
-err_destroy_io_queue:
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- return rc;
}
static int ena_rx_queue_setup(struct rte_eth_dev *dev,
@@ -1230,16 +1254,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
__rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct ena_com_create_io_ctx ctx =
- /* policy set to _HOST just to satisfy icc compiler */
- { ENA_ADMIN_PLACEMENT_POLICY_HOST,
- ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 };
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_ring *rxq = NULL;
- uint16_t ena_qid = 0;
- int i, rc = 0;
- struct ena_com_dev *ena_dev = &adapter->ena_dev;
+ int i;
rxq = &adapter->rx_ring[queue_idx];
if (rxq->configured) {
@@ -1263,36 +1281,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- ena_qid = ENA_IO_RXQ_IDX(queue_idx);
-
- ctx.qid = ena_qid;
- ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
- ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- ctx.msix_vector = -1; /* admin interrupts not used */
- ctx.queue_size = adapter->rx_ring_size;
- ctx.numa_node = ena_cpu_to_node(queue_idx);
-
- rc = ena_com_create_io_queue(ena_dev, &ctx);
- if (rc) {
- RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n",
- queue_idx, rc);
- return rc;
- }
-
- rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
- rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
-
- rc = ena_com_get_io_handlers(ena_dev, ena_qid,
- &rxq->ena_com_io_sq,
- &rxq->ena_com_io_cq);
- if (rc) {
- RTE_LOG(ERR, PMD,
- "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
- queue_idx, rc);
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- return rc;
- }
-
rxq->port_id = dev->data->port_id;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
@@ -1304,7 +1292,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!rxq->rx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
- ena_com_destroy_io_queue(ena_dev, ena_qid);
return -ENOMEM;
}
@@ -1315,7 +1302,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
rte_free(rxq->rx_buffer_info);
rxq->rx_buffer_info = NULL;
- ena_com_destroy_io_queue(ena_dev, ena_qid);
return -ENOMEM;
}
@@ -1326,7 +1312,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
rxq->configured = 1;
dev->data->rx_queues[queue_idx] = rxq;
- return rc;
+ return 0;
}
static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
@@ -1703,7 +1689,7 @@ static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
(struct ena_adapter *)(eth_dev->data->dev_private);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ return 0;
if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
ena_close(eth_dev);
@@ -1924,7 +1910,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* fill mbuf attributes if any */
ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
- mbuf_head->hash.rss = (uint32_t)rx_ring->id;
+ mbuf_head->hash.rss = ena_rx_ctx.hash;
/* pass to DPDK application head mbuf */
rx_pkts[recv_idx] = mbuf_head;
diff --git a/drivers/net/enetc/Makefile b/drivers/net/enetc/Makefile
new file mode 100644
index 00000000..9895501d
--- /dev/null
+++ b/drivers/net/enetc/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_enetc.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+EXPORT_MAP := rte_pmd_enetc_version.map
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_ENETC_PMD) += enetc_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENETC_PMD) += enetc_rxtx.c
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/enetc/base/enetc_hw.h b/drivers/net/enetc/base/enetc_hw.h
new file mode 100644
index 00000000..f36fa11e
--- /dev/null
+++ b/drivers/net/enetc/base/enetc_hw.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _ENETC_HW_H_
+#define _ENETC_HW_H_
+#include <rte_io.h>
+
+#define BIT(x) ((uint64_t)1 << ((x)))
+
+/* ENETC device IDs */
+#define ENETC_DEV_ID_VF 0xef00
+#define ENETC_DEV_ID 0xe100
+
+/* ENETC register block BAR */
+#define ENETC_BAR_REGS 0x0
+
+/* SI regs, offset: 0h */
+#define ENETC_SIMR 0x0
+#define ENETC_SIMR_EN BIT(31)
+
+#define ENETC_SIPMAR0 0x80
+#define ENETC_SIPMAR1 0x84
+
+#define ENETC_SICAPR0 0x900
+#define ENETC_SICAPR1 0x904
+
+#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
+#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
+
+#define ENETC_SICCAPR 0x1200
+
+/* enum for BD type */
+enum enetc_bdr_type {TX, RX};
+
+#define ENETC_BDR(type, n, off) (0x8000 + (type) * 0x100 + (n) * 0x200 \
+ + (off))
+/* RX BDR reg offsets */
+#define ENETC_RBMR 0x0 /* RX BDR mode register*/
+#define ENETC_RBMR_EN BIT(31)
+
+#define ENETC_RBSR 0x4 /* Rx BDR status register*/
+#define ENETC_RBBSR 0x8 /* Rx BDR buffer size register*/
+#define ENETC_RBCIR 0xc /* Rx BDR consumer index register*/
+#define ENETC_RBBAR0 0x10 /* Rx BDR base address register 0 */
+#define ENETC_RBBAR1 0x14 /* Rx BDR base address register 1*/
+#define ENETC_RBPIR 0x18 /* Rx BDR producer index register*/
+#define ENETC_RBLENR 0x20 /* Rx BDR length register*/
+#define ENETC_RBIER 0xa0 /* Rx BDR interrupt enable register*/
+#define ENETC_RBIER_RXTIE BIT(0)
+#define ENETC_RBIDR 0xa4 /* Rx BDR interrupt detect register*/
+#define ENETC_RBICIR0 0xa8 /* Rx BDR inetrrupt coalescing register 0*/
+#define ENETC_RBICIR0_ICEN BIT(31)
+
+
+#define ENETC_TBMR 0x0 /* Tx BDR mode register (TBMR) 32 RW */
+#define ENETC_TBSR 0x4 /* x BDR status register (TBSR) 32 RO */
+#define ENETC_TBBAR0 0x10 /* Tx BDR base address register 0 (TBBAR0) 32 RW */
+#define ENETC_TBBAR1 0x14 /* Tx BDR base address register 1 (TBBAR1) 32 RW */
+#define ENETC_TBCIR 0x18 /* Tx BDR consumer index register (TBCIR) 32 RW */
+#define ENETC_TBCISR 0x1C /* Tx BDR consumer index shadow register 32 RW */
+#define ENETC_TBIER 0xA0 /* Tx BDR interrupt enable register 32 RW */
+#define ENETC_TBIDR 0xA4 /* Tx BDR interrupt detect register 32 RO */
+#define ENETC_TBICR0 0xA8 /* Tx BDR interrupt coalescing register 0 32 RW */
+#define ENETC_TBICR1 0xAC /* Tx BDR interrupt coalescing register 1 32 RW */
+#define ENETC_TBLENR 0x20
+
+#define ENETC_TBCISR_IDX_MASK 0xffff
+#define ENETC_TBIER_TXFIE BIT(1)
+
+#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7)
+#define ENETC_TBMR_EN BIT(31)
+
+/* Port regs, offset: 1_0000h */
+#define ENETC_PORT_BASE 0x10000
+#define ENETC_PMR 0x00000
+#define ENETC_PMR_EN (BIT(16) | BIT(17) | BIT(18))
+#define ENETC_PSR 0x00004 /* RO */
+#define ENETC_PSIPMR 0x00018
+#define ENETC_PSIPMR_SET_UP(n) (0x1 << (n)) /* n = SI index */
+#define ENETC_PSIPMR_SET_MP(n) (0x1 << ((n) + 8))
+#define ENETC_PSIPMR_SET_VLAN_MP(n) (0x1 << ((n) + 16))
+#define ENETC_PSIPMAR0(n) (0x00100 + (n) * 0x20)
+#define ENETC_PSIPMAR1(n) (0x00104 + (n) * 0x20)
+#define ENETC_PCAPR0 0x00900
+#define ENETC_PCAPR1 0x00904
+
+#define ENETC_PV0CFGR(n) (0x00920 + (n) * 0x10)
+#define ENETC_PVCFGR_SET_TXBDR(val) ((val) & 0xff)
+#define ENETC_PVCFGR_SET_RXBDR(val) (((val) & 0xff) << 16)
+
+#define ENETC_PM0_CMD_CFG 0x08008
+#define ENETC_PM0_TX_EN BIT(0)
+#define ENETC_PM0_RX_EN BIT(1)
+
+#define ENETC_PM0_MAXFRM 0x08014
+#define ENETC_SET_MAXFRM(val) ((val) << 16)
+
+#define ENETC_PM0_STATUS 0x08304
+#define ENETC_LINK_MODE 0x0000000000080000ULL
+#define ENETC_LINK_STATUS 0x0000000000010000ULL
+#define ENETC_LINK_SPEED_MASK 0x0000000000060000ULL
+#define ENETC_LINK_SPEED_10M 0x0ULL
+#define ENETC_LINK_SPEED_100M 0x0000000000020000ULL
+#define ENETC_LINK_SPEED_1G 0x0000000000040000ULL
+
+/* Global regs, offset: 2_0000h */
+#define ENETC_GLOBAL_BASE 0x20000
+#define ENETC_G_EIPBRR0 0x00bf8
+#define ENETC_G_EIPBRR1 0x00bfc
+
+/* general register accessors */
+#define enetc_rd_reg(reg) rte_read32((void *)(reg))
+#define enetc_wr_reg(reg, val) rte_write32((val), (void *)(reg))
+#define enetc_rd(hw, off) enetc_rd_reg((size_t)(hw)->reg + (off))
+#define enetc_wr(hw, off, val) enetc_wr_reg((size_t)(hw)->reg + (off), val)
+/* port register accessors - PF only */
+#define enetc_port_rd(hw, off) enetc_rd_reg((size_t)(hw)->port + (off))
+#define enetc_port_wr(hw, off, val) \
+ enetc_wr_reg((size_t)(hw)->port + (off), val)
+/* global register accessors - PF only */
+#define enetc_global_rd(hw, off) \
+ enetc_rd_reg((size_t)(hw)->global + (off))
+#define enetc_global_wr(hw, off, val) \
+ enetc_wr_reg((size_t)(hw)->global + (off), val)
+/* BDR register accessors, see ENETC_BDR() */
+#define enetc_bdr_rd(hw, t, n, off) \
+ enetc_rd(hw, ENETC_BDR(t, n, off))
+#define enetc_bdr_wr(hw, t, n, off, val) \
+ enetc_wr(hw, ENETC_BDR(t, n, off), val)
+
+#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off)
+#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off)
+#define enetc_txbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, TX, n, off, val)
+#define enetc_rxbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, RX, n, off, val)
+
+#define ENETC_TX_ADDR(txq, addr) ((void *)((txq)->enetc_txbdr + (addr)))
+
+#define ENETC_TXBD_FLAGS_IE BIT(13)
+#define ENETC_TXBD_FLAGS_F BIT(15)
+
+/* ENETC Parsed values (Little Endian) */
+#define ENETC_PKT_TYPE_ETHER 0x0060
+#define ENETC_PKT_TYPE_IPV4 0x0000
+#define ENETC_PKT_TYPE_IPV6 0x0020
+#define ENETC_PKT_TYPE_IPV4_TCP \
+ (0x0010 | ENETC_PKT_TYPE_IPV4)
+#define ENETC_PKT_TYPE_IPV6_TCP \
+ (0x0010 | ENETC_PKT_TYPE_IPV6)
+#define ENETC_PKT_TYPE_IPV4_UDP \
+ (0x0011 | ENETC_PKT_TYPE_IPV4)
+#define ENETC_PKT_TYPE_IPV6_UDP \
+ (0x0011 | ENETC_PKT_TYPE_IPV6)
+#define ENETC_PKT_TYPE_IPV4_SCTP \
+ (0x0013 | ENETC_PKT_TYPE_IPV4)
+#define ENETC_PKT_TYPE_IPV6_SCTP \
+ (0x0013 | ENETC_PKT_TYPE_IPV6)
+#define ENETC_PKT_TYPE_IPV4_ICMP \
+ (0x0003 | ENETC_PKT_TYPE_IPV4)
+#define ENETC_PKT_TYPE_IPV6_ICMP \
+ (0x0003 | ENETC_PKT_TYPE_IPV6)
+
+/* PCI device info */
+struct enetc_hw {
+ void *reg; /* SI registers, used by all PCI functions */
+ void *port; /* Port registers, PF only */
+ void *global; /* IP global registers, PF only */
+};
+
+struct enetc_eth_mac_info {
+ uint8_t addr[ETHER_ADDR_LEN];
+ uint8_t perm_addr[ETHER_ADDR_LEN];
+ uint8_t get_link_status;
+};
+
+struct enetc_eth_hw {
+ struct rte_eth_dev *ndev;
+ struct enetc_hw hw;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint8_t revision_id;
+ struct enetc_eth_mac_info mac;
+};
+
+/* Transmit Descriptor */
+struct enetc_tx_desc {
+ uint64_t addr;
+ uint16_t frm_len;
+ uint16_t buf_len;
+ uint32_t flags_errors;
+};
+
+/* TX Buffer Descriptors (BD) */
+struct enetc_tx_bd {
+ uint64_t addr;
+ uint16_t buf_len;
+ uint16_t frm_len;
+ uint16_t err_csum;
+ uint16_t flags;
+};
+
+/* RX buffer descriptor */
+union enetc_rx_bd {
+ struct {
+ uint64_t addr;
+ uint8_t reserved[8];
+ } w;
+ struct {
+ uint16_t inet_csum;
+ uint16_t parse_summary;
+ uint32_t rss_hash;
+ uint16_t buf_len;
+ uint16_t vlan_opt;
+ union {
+ struct {
+ uint16_t flags;
+ uint16_t error;
+ };
+ uint32_t lstatus;
+ };
+ } r;
+};
+
+#endif
diff --git a/drivers/net/enetc/enetc.h b/drivers/net/enetc/enetc.h
new file mode 100644
index 00000000..0e80d1c5
--- /dev/null
+++ b/drivers/net/enetc/enetc.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _ENETC_H_
+#define _ENETC_H_
+
+#include <rte_time.h>
+
+#include "base/enetc_hw.h"
+
+#define PCI_VENDOR_ID_FREESCALE 0x1957
+
+/* Max TX rings per ENETC. */
+#define MAX_TX_RINGS 2
+
+/* Max RX rings per ENTEC. */
+#define MAX_RX_RINGS 1
+
+/* Max BD counts per Ring. */
+#define MAX_BD_COUNT 64000
+/* Min BD counts per Ring. */
+#define MIN_BD_COUNT 32
+/* BD ALIGN */
+#define BD_ALIGN 8
+
+/*
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ *
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
+ */
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+
+/*
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((uint32_t)(n))
+
+#define ENETC_TXBD(BDR, i) (&(((struct enetc_tx_bd *)((BDR).bd_base))[i]))
+#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+struct enetc_swbd {
+ struct rte_mbuf *buffer_addr;
+};
+
+struct enetc_bdr {
+ struct rte_eth_dev *ndev;
+ struct rte_mempool *mb_pool; /* mbuf pool to populate RX ring. */
+ void *bd_base; /* points to Rx or Tx BD ring */
+ union {
+ void *tcir;
+ void *rcir;
+ };
+ uint16_t index;
+ int bd_count; /* # of BDs */
+ int next_to_use;
+ int next_to_clean;
+ struct enetc_swbd *q_swbd;
+ union {
+ void *tcisr; /* Tx */
+ int next_to_alloc; /* Rx */
+ };
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct enetc_eth_adapter {
+ struct rte_eth_dev *ndev;
+ struct enetc_eth_hw hw;
+};
+
+#define ENETC_DEV_PRIVATE(adapter) \
+ ((struct enetc_eth_adapter *)adapter)
+
+#define ENETC_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct enetc_eth_adapter *)adapter)->hw)
+
+#define ENETC_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct enetc_eth_adapter *)adapter)->stats)
+
+#define ENETC_DEV_PRIVATE_TO_INTR(adapter) \
+ (&((struct enetc_eth_adapter *)adapter)->intr)
+
+#define ENETC_GET_HW_ADDR(reg, addr) ((void *)(((size_t)reg) + (addr)))
+#define ENETC_REG_READ(addr) (*(uint32_t *)addr)
+#define ENETC_REG_WRITE(addr, val) (*(uint32_t *)addr = val)
+#define ENETC_REG_WRITE_RELAXED(addr, val) (*(uint32_t *)addr = val)
+
+/*
+ * RX/TX ENETC function prototypes
+ */
+uint16_t enetc_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+
+int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt);
+
+static inline int
+enetc_bd_unused(struct enetc_bdr *bdr)
+{
+ if (bdr->next_to_clean > bdr->next_to_use)
+ return bdr->next_to_clean - bdr->next_to_use - 1;
+
+ return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
+}
+#endif /* _ENETC_H_ */
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
new file mode 100644
index 00000000..023fe751
--- /dev/null
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <stdbool.h>
+#include <rte_ethdev_pci.h>
+
+#include "enetc_logs.h"
+#include "enetc.h"
+
+int enetc_logtype_pmd;
+
+/* Functions Prototypes */
+static int enetc_dev_configure(struct rte_eth_dev *dev);
+static int enetc_dev_start(struct rte_eth_dev *dev);
+static void enetc_dev_stop(struct rte_eth_dev *dev);
+static void enetc_dev_close(struct rte_eth_dev *dev);
+static void enetc_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static int enetc_hardware_init(struct enetc_eth_hw *hw);
+static int enetc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+static void enetc_rx_queue_release(void *rxq);
+static int enetc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+static void enetc_tx_queue_release(void *txq);
+static const uint32_t *enetc_supported_ptypes_get(struct rte_eth_dev *dev);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_enetc_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+/* Features supported by this driver */
+static const struct eth_dev_ops enetc_ops = {
+ .dev_configure = enetc_dev_configure,
+ .dev_start = enetc_dev_start,
+ .dev_stop = enetc_dev_stop,
+ .dev_close = enetc_dev_close,
+ .link_update = enetc_link_update,
+ .dev_infos_get = enetc_dev_infos_get,
+ .rx_queue_setup = enetc_rx_queue_setup,
+ .rx_queue_release = enetc_rx_queue_release,
+ .tx_queue_setup = enetc_tx_queue_setup,
+ .tx_queue_release = enetc_tx_queue_release,
+ .dev_supported_ptypes_get = enetc_supported_ptypes_get,
+};
+
+/**
+ * Initialisation of the enetc device
+ *
+ * @param eth_dev
+ * - Pointer to the structure rte_eth_dev
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static int
+enetc_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int error = 0;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ eth_dev->dev_ops = &enetc_ops;
+ eth_dev->rx_pkt_burst = &enetc_recv_pkts;
+ eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
+
+ /* Retrieving and storing the HW base address of device */
+ hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
+ hw->device_id = pci_dev->id.device_id;
+
+ error = enetc_hardware_init(hw);
+ if (error != 0) {
+ ENETC_PMD_ERR("Hardware initialization failed");
+ return -1;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * 1);
+ error = -ENOMEM;
+ return -1;
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+ return 0;
+}
+
+static int
+enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static int
+enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static int
+enetc_dev_start(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+ val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
+ ENETC_PM0_CMD_CFG));
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
+ val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+
+ /* Enable port */
+ val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
+ val | ENETC_PMR_EN);
+
+ return 0;
+}
+
+static void
+enetc_dev_stop(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+ /* Disable port */
+ val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
+ val & (~ENETC_PMR_EN));
+
+ val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
+ ENETC_PM0_CMD_CFG));
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
+ val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
+}
+
+static void
+enetc_dev_close(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ enetc_dev_stop(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ enetc_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ enetc_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static const uint32_t *
+enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ uint32_t status;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&link, 0, sizeof(link));
+
+ status = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
+ ENETC_PM0_STATUS));
+
+ if (status & ENETC_LINK_MODE)
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ else
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+
+ if (status & ENETC_LINK_STATUS)
+ link.link_status = ETH_LINK_UP;
+ else
+ link.link_status = ETH_LINK_DOWN;
+
+ switch (status & ENETC_LINK_SPEED_MASK) {
+ case ENETC_LINK_SPEED_1G:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+
+ case ENETC_LINK_SPEED_100M:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+
+ default:
+ case ENETC_LINK_SPEED_10M:
+ link.link_speed = ETH_SPEED_NUM_10M;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+enetc_hardware_init(struct enetc_eth_hw *hw)
+{
+ uint32_t psipmr = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ /* Calculating and storing the base HW addresses */
+ hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
+ hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
+
+ /* Enabling Station Interface */
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.reg, ENETC_SIMR),
+ ENETC_SIMR_EN);
+
+ /* Setting to accept broadcast packets for each inetrface */
+ psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0) |
+ ENETC_PSIPMR_SET_VLAN_MP(0);
+ psipmr |= ENETC_PSIPMR_SET_UP(1) | ENETC_PSIPMR_SET_MP(1) |
+ ENETC_PSIPMR_SET_VLAN_MP(1);
+ psipmr |= ENETC_PSIPMR_SET_UP(2) | ENETC_PSIPMR_SET_MP(2) |
+ ENETC_PSIPMR_SET_VLAN_MP(2);
+
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMR),
+ psipmr);
+
+ /* Enabling broadcast address */
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR0(0)),
+ 0xFFFFFFFF);
+ ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR1(0)),
+ 0xFFFF << 16);
+
+ return 0;
+}
+
+static void
+enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ PMD_INIT_FUNC_TRACE();
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = MAX_BD_COUNT,
+ .nb_min = MIN_BD_COUNT,
+ .nb_align = BD_ALIGN,
+ };
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = MAX_BD_COUNT,
+ .nb_min = MIN_BD_COUNT,
+ .nb_align = BD_ALIGN,
+ };
+ dev_info->max_rx_queues = MAX_RX_RINGS;
+ dev_info->max_tx_queues = MAX_TX_RINGS;
+ dev_info->max_rx_pktlen = 1500;
+}
+
+static int
+enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
+{
+ int size;
+
+ size = nb_desc * sizeof(struct enetc_swbd);
+ txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (txr->q_swbd == NULL)
+ return -ENOMEM;
+
+ size = nb_desc * sizeof(struct enetc_tx_bd);
+ txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (txr->bd_base == NULL) {
+ rte_free(txr->q_swbd);
+ txr->q_swbd = NULL;
+ return -ENOMEM;
+ }
+
+ txr->bd_count = nb_desc;
+ txr->next_to_clean = 0;
+ txr->next_to_use = 0;
+
+ return 0;
+}
+
+static void
+enetc_free_bdr(struct enetc_bdr *rxr)
+{
+ rte_free(rxr->q_swbd);
+ rte_free(rxr->bd_base);
+ rxr->q_swbd = NULL;
+ rxr->bd_base = NULL;
+}
+
+static void
+enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ uintptr_t base_addr;
+ uint32_t tbmr;
+
+ base_addr = (uintptr_t)tx_ring->bd_base;
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
+ lower_32_bits((uint64_t)base_addr));
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
+ upper_32_bits((uint64_t)base_addr));
+ enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
+ ENETC_RTBLENR_LEN(tx_ring->bd_count));
+
+ tbmr = ENETC_TBMR_EN;
+ /* enable ring */
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+ enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
+ enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
+ tx_ring->tcir = (void *)((size_t)hw->reg +
+ ENETC_BDR(TX, idx, ENETC_TBCIR));
+ tx_ring->tcisr = (void *)((size_t)hw->reg +
+ ENETC_BDR(TX, idx, ENETC_TBCISR));
+}
+
+static int
+enetc_alloc_tx_resources(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc)
+{
+ int err;
+ struct enetc_bdr *tx_ring;
+ struct rte_eth_dev_data *data = dev->data;
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(data->dev_private);
+
+ tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
+ if (tx_ring == NULL) {
+ ENETC_PMD_ERR("Failed to allocate TX ring memory");
+ err = -ENOMEM;
+ return -1;
+ }
+
+ err = enetc_alloc_txbdr(tx_ring, nb_desc);
+ if (err)
+ goto fail;
+
+ tx_ring->index = queue_idx;
+ tx_ring->ndev = dev;
+ enetc_setup_txbdr(&priv->hw.hw, tx_ring);
+ data->tx_queues[queue_idx] = tx_ring;
+
+ return 0;
+fail:
+ rte_free(tx_ring);
+
+ return err;
+}
+
+static int
+enetc_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ if (nb_desc > MAX_BD_COUNT)
+ return -1;
+
+ err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
+
+ return err;
+}
+
+static void
+enetc_tx_queue_release(void *txq)
+{
+ if (txq == NULL)
+ return;
+
+ struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
+ struct enetc_eth_hw *eth_hw =
+ ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
+ struct enetc_hw *hw;
+ struct enetc_swbd *tx_swbd;
+ int i;
+ uint32_t val;
+
+ /* Disable the ring */
+ hw = &eth_hw->hw;
+ val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
+ val &= (~ENETC_TBMR_EN);
+ enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
+
+ /* clean the ring*/
+ i = tx_ring->next_to_clean;
+ tx_swbd = &tx_ring->q_swbd[i];
+ while (tx_swbd->buffer_addr != NULL) {
+ rte_pktmbuf_free(tx_swbd->buffer_addr);
+ tx_swbd->buffer_addr = NULL;
+ tx_swbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = &tx_ring->q_swbd[i];
+ }
+ }
+
+ enetc_free_bdr(tx_ring);
+ rte_free(tx_ring);
+}
+
+static int
+enetc_alloc_rxbdr(struct enetc_bdr *rxr,
+ uint16_t nb_rx_desc)
+{
+ int size;
+
+ size = nb_rx_desc * sizeof(struct enetc_swbd);
+ rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (rxr->q_swbd == NULL)
+ return -ENOMEM;
+
+ size = nb_rx_desc * sizeof(union enetc_rx_bd);
+ rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (rxr->bd_base == NULL) {
+ rte_free(rxr->q_swbd);
+ rxr->q_swbd = NULL;
+ return -ENOMEM;
+ }
+
+ rxr->bd_count = nb_rx_desc;
+ rxr->next_to_clean = 0;
+ rxr->next_to_use = 0;
+ rxr->next_to_alloc = 0;
+
+ return 0;
+}
+
+static void
+enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
+ struct rte_mempool *mb_pool)
+{
+ int idx = rx_ring->index;
+ uintptr_t base_addr;
+ uint16_t buf_size;
+
+ base_addr = (uintptr_t)rx_ring->bd_base;
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
+ lower_32_bits((uint64_t)base_addr));
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
+ upper_32_bits((uint64_t)base_addr));
+ enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
+ ENETC_RTBLENR_LEN(rx_ring->bd_count));
+
+ rx_ring->mb_pool = mb_pool;
+ /* enable ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+ rx_ring->rcir = (void *)((size_t)hw->reg +
+ ENETC_BDR(RX, idx, ENETC_RBCIR));
+ enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
+}
+
+static int
+enetc_alloc_rx_resources(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ struct rte_mempool *mb_pool)
+{
+ int err;
+ struct enetc_bdr *rx_ring;
+ struct rte_eth_dev_data *data = dev->data;
+ struct enetc_eth_adapter *adapter =
+ ENETC_DEV_PRIVATE(data->dev_private);
+
+ rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
+ if (rx_ring == NULL) {
+ ENETC_PMD_ERR("Failed to allocate RX ring memory");
+ err = -ENOMEM;
+ return err;
+ }
+
+ err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
+ if (err)
+ goto fail;
+
+ rx_ring->index = rx_queue_id;
+ rx_ring->ndev = dev;
+ enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
+ data->rx_queues[rx_queue_id] = rx_ring;
+
+ return 0;
+fail:
+ rte_free(rx_ring);
+
+ return err;
+}
+
+static int
+enetc_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ if (nb_rx_desc > MAX_BD_COUNT)
+ return -1;
+
+ err = enetc_alloc_rx_resources(dev, rx_queue_id,
+ nb_rx_desc,
+ mb_pool);
+
+ return err;
+}
+
+static void
+enetc_rx_queue_release(void *rxq)
+{
+ if (rxq == NULL)
+ return;
+
+ struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
+ struct enetc_eth_hw *eth_hw =
+ ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
+ struct enetc_swbd *q_swbd;
+ struct enetc_hw *hw;
+ uint32_t val;
+ int i;
+
+ /* Disable the ring */
+ hw = &eth_hw->hw;
+ val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
+ val &= (~ENETC_RBMR_EN);
+ enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
+
+ /* Clean the ring */
+ i = rx_ring->next_to_clean;
+ q_swbd = &rx_ring->q_swbd[i];
+ while (i != rx_ring->next_to_use) {
+ rte_pktmbuf_free(q_swbd->buffer_addr);
+ q_swbd->buffer_addr = NULL;
+ q_swbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ q_swbd = &rx_ring->q_swbd[i];
+ }
+ }
+
+ enetc_free_bdr(rx_ring);
+ rte_free(rx_ring);
+}
+
+static int
+enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct enetc_eth_adapter),
+ enetc_dev_init);
+}
+
+static int
+enetc_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
+}
+
+static struct rte_pci_driver rte_enetc_pmd = {
+ .id_table = pci_id_enetc_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = enetc_pci_probe,
+ .remove = enetc_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
+
+RTE_INIT(enetc_pmd_init_log)
+{
+ enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
+ if (enetc_logtype_pmd >= 0)
+ rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/enetc/enetc_logs.h b/drivers/net/enetc/enetc_logs.h
new file mode 100644
index 00000000..c8a6c0cf
--- /dev/null
+++ b/drivers/net/enetc/enetc_logs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _ENETC_LOGS_H_
+#define _ENETC_LOGS_H_
+
+extern int enetc_logtype_pmd;
+
+#define ENETC_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, enetc_logtype_pmd, "enetc_net: " \
+ fmt "\n", ##args)
+
+#define ENETC_PMD_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, enetc_logtype_pmd, "enetc_net: %s(): "\
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() ENETC_PMD_DEBUG(">>")
+
+#define ENETC_PMD_CRIT(fmt, args...) \
+ ENETC_PMD_LOG(CRIT, fmt, ## args)
+#define ENETC_PMD_INFO(fmt, args...) \
+ ENETC_PMD_LOG(INFO, fmt, ## args)
+#define ENETC_PMD_ERR(fmt, args...) \
+ ENETC_PMD_LOG(ERR, fmt, ## args)
+#define ENETC_PMD_WARN(fmt, args...) \
+ ENETC_PMD_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define ENETC_PMD_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define ENETC_PMD_DP_DEBUG(fmt, args...) \
+ ENETC_PMD_DP_LOG(DEBUG, fmt, ## args)
+#define ENETC_PMD_DP_INFO(fmt, args...) \
+ ENETC_PMD_DP_LOG(INFO, fmt, ## args)
+#define ENETC_PMD_DP_WARN(fmt, args...) \
+ ENETC_PMD_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _ENETC_LOGS_H_*/
diff --git a/drivers/net/enetc/enetc_rxtx.c b/drivers/net/enetc/enetc_rxtx.c
new file mode 100644
index 00000000..631e2430
--- /dev/null
+++ b/drivers/net/enetc/enetc_rxtx.c
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "rte_ethdev.h"
+#include "rte_malloc.h"
+#include "rte_memzone.h"
+
+#include "base/enetc_hw.h"
+#include "enetc.h"
+#include "enetc_logs.h"
+
+#define ENETC_RXBD_BUNDLE 8 /* Number of BDs to update at once */
+
+static int
+enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int tx_frm_cnt = 0;
+ struct enetc_swbd *tx_swbd;
+ int i;
+
+ i = tx_ring->next_to_clean;
+ tx_swbd = &tx_ring->q_swbd[i];
+ while ((int)(enetc_rd_reg(tx_ring->tcisr) &
+ ENETC_TBCISR_IDX_MASK) != i) {
+ rte_pktmbuf_free(tx_swbd->buffer_addr);
+ tx_swbd->buffer_addr = NULL;
+ tx_swbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = &tx_ring->q_swbd[0];
+ }
+
+ tx_frm_cnt++;
+ }
+
+ tx_ring->next_to_clean = i;
+ return tx_frm_cnt++;
+}
+
+uint16_t
+enetc_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct enetc_swbd *tx_swbd;
+ int i, start;
+ struct enetc_tx_bd *txbd;
+ struct enetc_bdr *tx_ring = (struct enetc_bdr *)tx_queue;
+
+ i = tx_ring->next_to_use;
+ start = 0;
+ while (nb_pkts--) {
+ enetc_clean_tx_ring(tx_ring);
+ tx_ring->q_swbd[i].buffer_addr = tx_pkts[start];
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->q_swbd[i];
+ txbd->frm_len = tx_pkts[start]->pkt_len;
+ txbd->buf_len = txbd->frm_len;
+ txbd->flags = rte_cpu_to_le_16(ENETC_TXBD_FLAGS_F);
+ txbd->addr = (uint64_t)(uintptr_t)
+ rte_cpu_to_le_64((size_t)tx_swbd->buffer_addr->buf_addr +
+ tx_swbd->buffer_addr->data_off);
+ i++;
+ start++;
+ if (unlikely(i == tx_ring->bd_count))
+ i = 0;
+ }
+
+ tx_ring->next_to_use = i;
+ enetc_wr_reg(tx_ring->tcir, i);
+ return start;
+}
+
+int
+enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
+{
+ struct enetc_swbd *rx_swbd;
+ union enetc_rx_bd *rxbd;
+ int i, j;
+
+ i = rx_ring->next_to_use;
+ rx_swbd = &rx_ring->q_swbd[i];
+ rxbd = ENETC_RXBD(*rx_ring, i);
+ for (j = 0; j < buff_cnt; j++) {
+ rx_swbd->buffer_addr =
+ rte_cpu_to_le_64(rte_mbuf_raw_alloc(rx_ring->mb_pool));
+ rxbd->w.addr = (uint64_t)(uintptr_t)
+ rx_swbd->buffer_addr->buf_addr +
+ rx_swbd->buffer_addr->data_off;
+ /* clear 'R" as well */
+ rxbd->r.lstatus = 0;
+ rx_swbd++;
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ rx_swbd = &rx_ring->q_swbd[i];
+ }
+ }
+
+ if (likely(j)) {
+ rx_ring->next_to_alloc = i;
+ rx_ring->next_to_use = i;
+ enetc_wr_reg(rx_ring->rcir, i);
+ }
+
+ return j;
+}
+
+
+static inline void __attribute__((hot))
+enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
+{
+ ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
+
+ m->packet_type = RTE_PTYPE_UNKNOWN;
+ switch (parse_results) {
+ case ENETC_PKT_TYPE_ETHER:
+ m->packet_type = RTE_PTYPE_L2_ETHER;
+ break;
+ case ENETC_PKT_TYPE_IPV4:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4;
+ break;
+ case ENETC_PKT_TYPE_IPV6:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6;
+ break;
+ case ENETC_PKT_TYPE_IPV4_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP;
+ break;
+ case ENETC_PKT_TYPE_IPV6_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_TCP;
+ break;
+ case ENETC_PKT_TYPE_IPV4_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_UDP;
+ break;
+ case ENETC_PKT_TYPE_IPV6_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_UDP;
+ break;
+ case ENETC_PKT_TYPE_IPV4_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_SCTP;
+ break;
+ case ENETC_PKT_TYPE_IPV6_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_SCTP;
+ break;
+ case ENETC_PKT_TYPE_IPV4_ICMP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_ICMP;
+ break;
+ case ENETC_PKT_TYPE_IPV6_ICMP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_ICMP;
+ break;
+ /* More switch cases can be added */
+ default:
+ m->packet_type = RTE_PTYPE_UNKNOWN;
+ }
+}
+
+static int
+enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ struct rte_mbuf **rx_pkts,
+ int work_limit)
+{
+ int rx_frm_cnt = 0;
+ int cleaned_cnt, i;
+ struct enetc_swbd *rx_swbd;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+ rx_swbd = &rx_ring->q_swbd[i];
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd;
+ uint32_t bd_status;
+
+ if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
+ int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+
+ cleaned_cnt -= count;
+ }
+
+ rxbd = ENETC_RXBD(*rx_ring, i);
+ bd_status = rte_le_to_cpu_32(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ rx_swbd->buffer_addr->pkt_len = rxbd->r.buf_len;
+ rx_swbd->buffer_addr->data_len = rxbd->r.buf_len;
+ rx_swbd->buffer_addr->hash.rss = rxbd->r.rss_hash;
+ rx_swbd->buffer_addr->ol_flags = 0;
+ enetc_dev_rx_parse(rx_swbd->buffer_addr,
+ rxbd->r.parse_summary);
+ rx_pkts[rx_frm_cnt] = rx_swbd->buffer_addr;
+ cleaned_cnt++;
+ rx_swbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rx_swbd = &rx_ring->q_swbd[i];
+ }
+
+ rx_ring->next_to_clean = i;
+ rx_frm_cnt++;
+ }
+
+ return rx_frm_cnt;
+}
+
+uint16_t
+enetc_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
+
+ return enetc_clean_rx_ring(rx_ring, rx_pkts, nb_pkts);
+}
diff --git a/drivers/net/enetc/meson.build b/drivers/net/enetc/meson.build
new file mode 100644
index 00000000..733156bb
--- /dev/null
+++ b/drivers/net/enetc/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+sources = files('enetc_ethdev.c',
+ 'enetc_rxtx.c')
+
+includes += include_directories('base')
diff --git a/drivers/net/enetc/rte_pmd_enetc_version.map b/drivers/net/enetc/rte_pmd_enetc_version.map
new file mode 100644
index 00000000..521e51f4
--- /dev/null
+++ b/drivers/net/enetc/rte_pmd_enetc_version.map
@@ -0,0 +1,4 @@
+DPDK_18.11 {
+
+ local: *;
+};
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 7c6c29cc..e39e4763 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -39,4 +39,32 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_intr.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rq.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rss.c
+# The current implementation assumes 64-bit pointers
+CC_AVX2_SUPPORT=0
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+# Figure out if the compiler supports avx2. The extra check using
+# -march=core-avx2 is necessary to support users who build for the
+# 'default' machine (corei7 which has no avx2) and run the binary on
+# newer CPUs that have avx2.
+# This part is verbatim from i40e makefile.
+ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
+ CC_AVX2_SUPPORT=1
+else
+ CC_AVX2_SUPPORT=\
+ $(shell $(CC) -march=core-avx2 -dM -E - </dev/null 2>&1 | \
+ grep -q AVX2 && echo 1)
+ ifeq ($(CC_AVX2_SUPPORT), 1)
+ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+ CFLAGS_enic_rxtx_vec_avx2.o += -march=core-avx2
+ else
+ CFLAGS_enic_rxtx_vec_avx2.o += -mavx2
+ endif
+ endif
+endif
+endif
+
+ifeq ($(CC_AVX2_SUPPORT), 1)
+ SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx_vec_avx2.c
+endif
+
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 16e8814a..fd303fec 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -57,6 +57,9 @@ struct vnic_dev {
void (*free_consistent)(void *priv,
size_t size, void *vaddr,
dma_addr_t dma_handle);
+ struct vnic_counter_counts *flow_counters;
+ dma_addr_t flow_counters_pa;
+ u8 flow_counters_dma_active;
};
#define VNIC_MAX_RES_HDR_SIZE \
@@ -64,6 +67,8 @@ struct vnic_dev {
sizeof(struct vnic_resource) * RES_TYPE_MAX)
#define VNIC_RES_STRIDE 128
+#define VNIC_MAX_FLOW_COUNTERS 2048
+
void *vnic_dev_priv(struct vnic_dev *vdev)
{
return vdev->priv;
@@ -611,6 +616,35 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
+/*
+ * Configure counter DMA
+ */
+int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
+ u32 num_counters)
+{
+ u64 args[3];
+ int wait = 1000;
+ int err;
+
+ if (num_counters > VNIC_MAX_FLOW_COUNTERS)
+ return -ENOMEM;
+ if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
+ num_counters == 0))
+ return -EINVAL;
+
+ args[0] = num_counters;
+ args[1] = vdev->flow_counters_pa;
+ args[2] = period;
+ err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait);
+
+ /* record if DMAs need to be stopped on close */
+ if (!err)
+ vdev->flow_counters_dma_active = (num_counters != 0 &&
+ period != 0);
+
+ return err;
+}
+
int vnic_dev_close(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
@@ -939,6 +973,24 @@ int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
return vdev->stats == NULL ? -ENOMEM : 0;
}
+/*
+ * Initialize for up to VNIC_MAX_FLOW_COUNTERS
+ */
+int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
+{
+ char name[NAME_MAX];
+ static u32 instance;
+
+ snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++);
+ vdev->flow_counters = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_counter_counts)
+ * VNIC_MAX_FLOW_COUNTERS,
+ &vdev->flow_counters_pa,
+ (u8 *)name);
+ vdev->flow_counters_dma_active = 0;
+ return vdev->flow_counters == NULL ? -ENOMEM : 0;
+}
+
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
@@ -951,6 +1003,16 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
vdev->free_consistent(vdev->priv,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
+ if (vdev->flow_counters) {
+ /* turn off counter DMAs before freeing memory */
+ if (vdev->flow_counters_dma_active)
+ vnic_dev_counter_dma_cfg(vdev, 0, 0);
+
+ vdev->free_consistent(vdev->priv,
+ sizeof(struct vnic_counter_counts)
+ * VNIC_MAX_FLOW_COUNTERS,
+ vdev->flow_counters, vdev->flow_counters_pa);
+ }
if (vdev->fw_info)
vdev->free_consistent(vdev->priv,
sizeof(struct vnic_devcmd_fw_info),
@@ -1094,3 +1156,46 @@ int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
(a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
(FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
}
+
+bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx)
+{
+ u64 a0 = 0;
+ u64 a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait))
+ return false;
+ *idx = (uint32_t)a0;
+ return true;
+}
+
+bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx)
+{
+ u64 a0 = idx;
+ u64 a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1,
+ wait) == 0;
+}
+
+bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
+ bool reset, uint64_t *packets, uint64_t *bytes)
+{
+ u64 a0 = idx;
+ u64 a1 = reset ? 1 : 0;
+ int wait = 1000;
+
+ if (reset) {
+ /* query/reset returns updated counters */
+ if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait))
+ return false;
+ *packets = a0;
+ *bytes = a1;
+ } else {
+ /* Get values DMA'd from the adapter */
+ *packets = vdev->flow_counters[idx].vcc_packets;
+ *bytes = vdev->flow_counters[idx].vcc_bytes;
+ }
+ return true;
+}
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 270a47bd..de2645c4 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -118,6 +118,8 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
+ u32 num_counters);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
@@ -170,6 +172,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
unsigned int num_bars);
struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
+int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev);
int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
int vnic_dev_get_size(void);
int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
@@ -187,4 +190,9 @@ int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number);
int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
+bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx);
+bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx);
+bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
+ bool reset, uint64_t *packets, uint64_t *bytes);
+
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index a22d8a76..3aad2dbd 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -598,6 +598,48 @@ enum vnic_devcmd_cmd {
* a3 = bitmask of supported actions
*/
CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
+
+ /*
+ * Allocate a counter for use with CMD_ADD_FILTER
+ * out:(u32) a0 = counter index
+ */
+ CMD_COUNTER_ALLOC = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ENET, 85),
+
+ /*
+ * Free a counter
+ * in: (u32) a0 = counter_id
+ */
+ CMD_COUNTER_FREE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 86),
+
+ /*
+ * Read a counter
+ * in: (u32) a0 = counter_id
+ * (u32) a1 = clear counter if non-zero
+ * out:(u64) a0 = packet count
+ * (u64) a1 = byte count
+ */
+ CMD_COUNTER_QUERY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 87),
+
+ /*
+ * Configure periodic counter DMA. This will trigger an immediate
+ * DMA of the counters (unless period == 0), and then schedule a DMA
+ * of the counters every <period> seconds until disdabled.
+ * Each new COUNTER_DMA_CONFIG will override all previous commands on
+ * this vnic.
+ * Setting a2 (period) = 0 will disable periodic DMAs
+ * If a0 (num_counters) != 0, an immediate DMA will always be done,
+ * irrespective of the value in a2.
+ * in: (u32) a0 = number of counters to DMA
+ * (u64) a1 = host target DMA address
+ * (u32) a2 = DMA period in milliseconds (0 to disable)
+ */
+ CMD_COUNTER_DMA_CONFIG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 88),
+#define VNIC_COUNTER_DMA_MIN_PERIOD 500
+
+ /*
+ * Clear all counters on a vnic
+ */
+ CMD_COUNTER_CLEAR_ALL = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ENET, 89),
};
/* Modes for exchanging advanced filter capabilities. The modes supported by
@@ -863,9 +905,11 @@ struct filter_action {
#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
#define FILTER_ACTION_DROP_FLAG (1 << 2)
+#define FILTER_ACTION_COUNTER_FLAG (1 << 3)
#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_FILTER_ID_FLAG \
| FILTER_ACTION_DROP_FLAG \
- | FILTER_ACTION_FILTER_ID_FLAG)
+ | FILTER_ACTION_COUNTER_FLAG)
/* Version 2 of filter action must be a strict extension of struct filter_action
* where the first fields exactly match in size and meaning.
@@ -875,7 +919,8 @@ struct filter_action_v2 {
u32 rq_idx;
u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */
u16 filter_id;
- u_int8_t reserved[32]; /* for future expansion */
+ u32 counter_index;
+ uint8_t reserved[28]; /* for future expansion */
} __attribute__((packed));
/* Specifies the filter type. */
@@ -941,9 +986,9 @@ enum {
};
struct filter_tlv {
- u_int32_t type;
- u_int32_t length;
- u_int32_t val[0];
+ uint32_t type;
+ uint32_t length;
+ uint32_t val[0];
};
/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
@@ -957,10 +1002,10 @@ struct filter_tlv {
* drivers should use this instead of "sizeof (struct filter_v2)" when
* computing length for TLV.
*/
-static inline u_int32_t
+static inline uint32_t
vnic_filter_size(struct filter_v2 *fp)
{
- u_int32_t size;
+ uint32_t size;
switch (fp->type) {
case FILTER_USNIC_ID:
@@ -999,10 +1044,10 @@ enum {
* drivers should use this instead of "sizeof (struct filter_action_v2)"
* when computing length for TLV.
*/
-static inline u_int32_t
+static inline uint32_t
vnic_action_size(struct filter_action_v2 *fap)
{
- u_int32_t size;
+ uint32_t size;
switch (fap->type) {
case FILTER_ACTION_RQ_STEERING:
@@ -1122,4 +1167,13 @@ typedef enum {
GRPINTR_UPD_VECT,
} grpintr_subcmd_t;
+/*
+ * Structure for counter DMA
+ * (DMAed by CMD_COUNTER_DMA_CONFIG)
+ */
+struct vnic_counter_counts {
+ u64 vcc_packets;
+ u64 vcc_bytes;
+};
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 7c27bd51..7bca3cad 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -39,6 +39,9 @@
#define PAGE_ROUND_UP(x) \
((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
+/* must be >= VNIC_COUNTER_DMA_MIN_PERIOD */
+#define VNIC_FLOW_COUNTER_UPDATE_MSECS 500
+
#define ENICPMD_VFIO_PATH "/dev/vfio/vfio"
/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
@@ -94,6 +97,7 @@ struct rte_flow {
LIST_ENTRY(rte_flow) next;
u16 enic_filter_id;
struct filter_v2 enic_filter;
+ int counter_idx; /* NIC allocated counter index (-1 = invalid) */
};
/* Per-instance private data structure */
@@ -104,6 +108,11 @@ struct enic {
struct vnic_dev_bar bar0;
struct vnic_dev *vdev;
+ /*
+ * mbuf_initializer contains 64 bits of mbuf rearm_data, used by
+ * the avx2 handler at this time.
+ */
+ uint64_t mbuf_initializer;
unsigned int port_id;
bool overlay_offload;
struct rte_eth_dev *rte_dev;
@@ -126,6 +135,7 @@ struct enic {
u8 filter_actions; /* HW supported actions */
bool vxlan;
bool disable_overlay; /* devargs disable_overlay=1 */
+ uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */
bool nic_cfg_chk; /* NIC_CFG_CHK available */
bool udp_rss_weak; /* Bodega style UDP RSS */
uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
@@ -165,6 +175,7 @@ struct enic {
rte_spinlock_t mtu_lock;
LIST_HEAD(enic_flows, rte_flow) flows;
+ int max_flow_counter;
rte_spinlock_t flows_lock;
/* RSS */
@@ -326,6 +337,7 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
int enic_link_update(struct enic *enic);
+bool enic_use_vector_rx_handler(struct enic *enic);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index b3d57771..996bb554 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -37,6 +37,7 @@ static const struct rte_pci_id pci_id_enic_map[] = {
};
#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
+#define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
RTE_INIT(enicpmd_init_log)
@@ -521,10 +522,34 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_NONFRAG,
RTE_PTYPE_UNKNOWN
};
+ static const uint32_t ptypes_overlay[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_UNKNOWN
+ };
- if (dev->rx_pkt_burst == enic_recv_pkts ||
- dev->rx_pkt_burst == enic_noscatter_recv_pkts)
- return ptypes;
+ if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
+ dev->rx_pkt_burst != NULL) {
+ struct enic *enic = pmd_priv(dev);
+ if (enic->overlay_offload)
+ return ptypes_overlay;
+ else
+ return ptypes;
+ }
return NULL;
}
@@ -915,22 +940,27 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
};
-static int enic_parse_disable_overlay(__rte_unused const char *key,
- const char *value,
- void *opaque)
+static int enic_parse_zero_one(const char *key,
+ const char *value,
+ void *opaque)
{
struct enic *enic;
+ bool b;
enic = (struct enic *)opaque;
if (strcmp(value, "0") == 0) {
- enic->disable_overlay = false;
+ b = false;
} else if (strcmp(value, "1") == 0) {
- enic->disable_overlay = true;
+ b = true;
} else {
- dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
- ": expected=0|1 given=%s\n", value);
+ dev_err(enic, "Invalid value for %s"
+ ": expected=0|1 given=%s\n", key, value);
return -EINVAL;
}
+ if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
+ enic->disable_overlay = b;
+ if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
+ enic->enable_avx2_rx = b;
return 0;
}
@@ -971,6 +1001,7 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
{
static const char *const valid_keys[] = {
ENIC_DEVARG_DISABLE_OVERLAY,
+ ENIC_DEVARG_ENABLE_AVX2_RX,
ENIC_DEVARG_IG_VLAN_REWRITE,
NULL};
struct enic *enic = pmd_priv(dev);
@@ -979,6 +1010,7 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
ENICPMD_FUNC_TRACE();
enic->disable_overlay = false;
+ enic->enable_avx2_rx = false;
enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
if (!dev->device->devargs)
return 0;
@@ -986,7 +1018,9 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
if (!kvlist)
return -EINVAL;
if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
- enic_parse_disable_overlay, enic) < 0 ||
+ enic_parse_zero_one, enic) < 0 ||
+ rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
+ enic_parse_zero_one, enic) < 0 ||
rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
enic_parse_ig_vlan_rewrite, enic) < 0) {
rte_kvargs_free(kvlist);
@@ -996,7 +1030,6 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
return 0;
}
-struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
*/
@@ -1044,7 +1077,8 @@ static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_enic_pmd = {
.id_table = pci_id_enic_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_enic_pci_probe,
.remove = eth_enic_pci_remove,
};
@@ -1054,4 +1088,5 @@ RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_enic,
ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
+ ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index 0cf04aef..bb9ed037 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -289,6 +289,15 @@ static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
RTE_FLOW_ACTION_TYPE_END,
};
+static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_COUNT,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
/** Action capabilities indexed by NIC version information */
static const struct enic_action_cap enic_action_cap[] = {
[FILTER_ACTION_RQ_STEERING_FLAG] = {
@@ -303,6 +312,10 @@ static const struct enic_action_cap enic_action_cap[] = {
.actions = enic_supported_actions_v2_drop,
.copy_fn = enic_copy_action_v2,
},
+ [FILTER_ACTION_COUNTER_FLAG] = {
+ .actions = enic_supported_actions_v2_count,
+ .copy_fn = enic_copy_action_v2,
+ },
};
static int
@@ -1068,6 +1081,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
enic_action->flags |= FILTER_ACTION_DROP_FLAG;
break;
}
+ case RTE_FLOW_ACTION_TYPE_COUNT: {
+ enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
+ break;
+ }
case RTE_FLOW_ACTION_TYPE_VOID:
continue;
default:
@@ -1112,7 +1129,9 @@ enic_get_action_cap(struct enic *enic)
uint8_t actions;
actions = enic->filter_actions;
- if (actions & FILTER_ACTION_DROP_FLAG)
+ if (actions & FILTER_ACTION_COUNTER_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
+ else if (actions & FILTER_ACTION_DROP_FLAG)
ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
@@ -1395,8 +1414,10 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
struct rte_flow_error *error)
{
struct rte_flow *flow;
- int ret;
- u16 entry;
+ int err;
+ uint16_t entry;
+ int ctr_idx;
+ int last_max_flow_ctr;
FLOW_TRACE();
@@ -1407,20 +1428,64 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
return NULL;
}
+ flow->counter_idx = -1;
+ last_max_flow_ctr = -1;
+ if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
+ if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "cannot allocate counter");
+ goto unwind_flow_alloc;
+ }
+ flow->counter_idx = ctr_idx;
+ enic_action->counter_index = ctr_idx;
+
+ /* If index is the largest, increase the counter DMA size */
+ if (ctr_idx > enic->max_flow_counter) {
+ err = vnic_dev_counter_dma_cfg(enic->vdev,
+ VNIC_FLOW_COUNTER_UPDATE_MSECS,
+ ctr_idx + 1);
+ if (err) {
+ rte_flow_error_set(error, -err,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "counter DMA config failed");
+ goto unwind_ctr_alloc;
+ }
+ last_max_flow_ctr = enic->max_flow_counter;
+ enic->max_flow_counter = ctr_idx;
+ }
+ }
+
/* entry[in] is the queue id, entry[out] is the filter Id for delete */
entry = enic_action->rq_idx;
- ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
+ err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
enic_action);
- if (!ret) {
- flow->enic_filter_id = entry;
- flow->enic_filter = *enic_filter;
- } else {
- rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ if (err) {
+ rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "vnic_dev_classifier error");
- rte_free(flow);
- return NULL;
+ goto unwind_ctr_dma_cfg;
}
+
+ flow->enic_filter_id = entry;
+ flow->enic_filter = *enic_filter;
+
return flow;
+
+/* unwind if there are errors */
+unwind_ctr_dma_cfg:
+ if (last_max_flow_ctr != -1) {
+ /* reduce counter DMA size */
+ vnic_dev_counter_dma_cfg(enic->vdev,
+ VNIC_FLOW_COUNTER_UPDATE_MSECS,
+ last_max_flow_ctr + 1);
+ enic->max_flow_counter = last_max_flow_ctr;
+ }
+unwind_ctr_alloc:
+ if (flow->counter_idx != -1)
+ vnic_dev_counter_free(enic->vdev, ctr_idx);
+unwind_flow_alloc:
+ rte_free(flow);
+ return NULL;
}
/**
@@ -1435,18 +1500,29 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
* @param error[out]
*/
static int
-enic_flow_del_filter(struct enic *enic, u16 filter_id,
+enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
struct rte_flow_error *error)
{
- int ret;
+ u16 filter_id;
+ int err;
FLOW_TRACE();
- ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
- if (!ret)
- rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ filter_id = flow->enic_filter_id;
+ err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
+ if (err) {
+ rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "vnic_dev_classifier failed");
- return ret;
+ return -err;
+ }
+
+ if (flow->counter_idx != -1) {
+ if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
+ dev_err(enic, "counter free failed, idx: %d\n",
+ flow->counter_idx);
+ flow->counter_idx = -1;
+ }
+ return 0;
}
/*
@@ -1529,9 +1605,10 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
FLOW_TRACE();
rte_spinlock_lock(&enic->flows_lock);
- enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ enic_flow_del_filter(enic, flow, error);
LIST_REMOVE(flow, next);
rte_spinlock_unlock(&enic->flows_lock);
+ rte_free(flow);
return 0;
}
@@ -1553,13 +1630,77 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
while (!LIST_EMPTY(&enic->flows)) {
flow = LIST_FIRST(&enic->flows);
- enic_flow_del_filter(enic, flow->enic_filter_id, error);
+ enic_flow_del_filter(enic, flow, error);
LIST_REMOVE(flow, next);
+ rte_free(flow);
}
rte_spinlock_unlock(&enic->flows_lock);
return 0;
}
+static int
+enic_flow_query_count(struct rte_eth_dev *dev,
+ struct rte_flow *flow, void *data,
+ struct rte_flow_error *error)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct rte_flow_query_count *query;
+ uint64_t packets, bytes;
+
+ FLOW_TRACE();
+
+ if (flow->counter_idx == -1) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow does not have counter");
+ }
+ query = (struct rte_flow_query_count *)data;
+ if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
+ !!query->reset, &packets, &bytes)) {
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ }
+ query->hits_set = 1;
+ query->bytes_set = 1;
+ query->hits = packets;
+ query->bytes = bytes;
+ return 0;
+}
+
+static int
+enic_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ FLOW_TRACE();
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = enic_flow_query_count(dev, flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
/**
* Flow callback registration.
*
@@ -1570,4 +1711,5 @@ const struct rte_flow_ops enic_flow_ops = {
.create = enic_flow_create,
.destroy = enic_flow_destroy,
.flush = enic_flow_flush,
+ .query = enic_flow_query,
};
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index fd940c58..e81c3f3b 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -514,12 +514,29 @@ static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
}
}
+/*
+ * The 'strong' version is in enic_rxtx_vec_avx2.c. This weak version is used
+ * used when that file is not compiled.
+ */
+bool __attribute__((weak))
+enic_use_vector_rx_handler(__rte_unused struct enic *enic)
+{
+ return false;
+}
+
static void pick_rx_handler(struct enic *enic)
{
struct rte_eth_dev *eth_dev;
- /* Use the non-scatter, simplified RX handler if possible. */
+ /*
+ * Preference order:
+ * 1. The vectorized handler if possible and requested.
+ * 2. The non-scatter, simplified handler if scatter Rx is not used.
+ * 3. The default handler as a fallback.
+ */
eth_dev = enic->rte_dev;
+ if (enic_use_vector_rx_handler(enic))
+ return;
if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
@@ -534,6 +551,25 @@ int enic_enable(struct enic *enic)
unsigned int index;
int err;
struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint64_t simple_tx_offloads;
+ uintptr_t p;
+
+ if (enic->enable_avx2_rx) {
+ struct rte_mbuf mb_def = { .buf_addr = 0 };
+
+ /*
+ * mbuf_initializer contains const-after-init fields of
+ * receive mbufs (i.e. 64 bits of fields from rearm_data).
+ * It is currently used by the vectorized handler.
+ */
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = enic->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ enic->mbuf_initializer = *(uint64_t *)p;
+ }
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -572,10 +608,17 @@ int enic_enable(struct enic *enic)
}
/*
- * Use the simple TX handler if possible. All offloads must be
- * disabled.
+ * Use the simple TX handler if possible. Only checksum offloads
+ * and vlan insertion are supported.
*/
- if (eth_dev->data->dev_conf.txmode.offloads == 0) {
+ simple_tx_offloads = enic->tx_offload_capa &
+ (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if ((eth_dev->data->dev_conf.txmode.offloads &
+ ~simple_tx_offloads) == 0) {
PMD_INIT_LOG(DEBUG, " use the simple tx handler");
eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
for (index = 0; index < enic->wq_count; index++)
@@ -1639,11 +1682,25 @@ static int enic_dev_init(struct enic *enic)
LIST_INIT(&enic->flows);
rte_spinlock_init(&enic->flows_lock);
+ enic->max_flow_counter = -1;
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
enic->overlay_offload = false;
+ if (enic->disable_overlay && enic->vxlan) {
+ /*
+ * Explicitly disable overlay offload as the setting is
+ * sticky, and resetting vNIC does not disable it.
+ */
+ if (vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_DISABLE)) {
+ dev_err(enic, "failed to disable overlay offload\n");
+ } else {
+ dev_info(enic, "Overlay offload is disabled\n");
+ }
+ }
if (!enic->disable_overlay && enic->vxlan &&
/* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
vnic_dev_overlay_offload_ctrl(enic->vdev,
@@ -1653,11 +1710,9 @@ static int enic_dev_init(struct enic *enic)
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
- /*
- * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
- * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
- */
enic->tx_offload_mask |=
+ PKT_TX_OUTER_IPV6 |
+ PKT_TX_OUTER_IPV4 |
PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_MASK;
enic->overlay_offload = true;
@@ -1708,14 +1763,20 @@ int enic_probe(struct enic *enic)
enic_free_consistent);
/*
- * Allocate the consistent memory for stats upfront so both primary and
- * secondary processes can dump stats.
+ * Allocate the consistent memory for stats and counters upfront so
+ * both primary and secondary processes can access them.
*/
err = vnic_dev_alloc_stats_mem(enic->vdev);
if (err) {
dev_err(enic, "Failed to allocate cmd memory, aborting\n");
goto err_out_unregister;
}
+ err = vnic_dev_alloc_counter_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate counter memory, aborting\n");
+ goto err_out_unregister;
+ }
+
/* Issue device open to get device in known state */
err = enic_dev_open(enic);
if (err) {
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 8d493ffe..24b2844f 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -85,7 +85,7 @@ int enic_get_vnic_config(struct enic *enic)
vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk,
&enic->udp_rss_weak);
- dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n",
+ dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s%s\n",
((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" :
((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" :
@@ -95,7 +95,9 @@ int enic_get_vnic_config(struct enic *enic)
((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ?
"tag " : ""),
((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ?
- "drop " : ""));
+ "drop " : ""),
+ ((enic->filter_actions & FILTER_ACTION_COUNTER_FLAG) ?
+ "count " : ""));
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -195,13 +197,14 @@ int enic_get_vnic_config(struct enic *enic)
enic->rx_offload_capa =
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
enic->tx_offload_mask =
- PKT_TX_VLAN_PKT |
+ PKT_TX_IPV6 |
+ PKT_TX_IPV4 |
+ PKT_TX_VLAN |
PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK |
PKT_TX_TCP_SEG;
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 7129e121..5189ee63 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -11,6 +11,7 @@
#include "enic_compat.h"
#include "rq_enet_desc.h"
#include "enic.h"
+#include "enic_rxtx_common.h"
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_tcp.h>
@@ -30,266 +31,6 @@
#define rte_packet_prefetch(p) do {} while (0)
#endif
-static inline uint16_t
-enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
-{
- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
-}
-
-static inline uint16_t
-enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
-{
- return le16_to_cpu(crd->bytes_written_flags) &
- ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_packet_error(uint16_t bwflags)
-{
- return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_eop(uint16_t ciflags)
-{
- return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
- == CQ_ENET_RQ_DESC_FLAGS_EOP;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
-{
- return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
- return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
- CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
- return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
- CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
-{
- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
- CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
-}
-
-static inline uint32_t
-enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
-{
- return le32_to_cpu(cqrd->rss_hash);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
-{
- return le16_to_cpu(cqrd->vlan);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- return le16_to_cpu(cqrd->bytes_written_flags) &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-}
-
-
-static inline uint8_t
-enic_cq_rx_check_err(struct cq_desc *cqd)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t bwflags;
-
- bwflags = enic_cq_rx_desc_bwflags(cqrd);
- if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
- return 1;
- return 0;
-}
-
-/* Lookup table to translate RX CQ flags to mbuf flags. */
-static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint8_t cqrd_flags = cqrd->flags;
- /*
- * Odd-numbered entries are for tunnel packets. All packet type info
- * applies to the inner packet, and there is no info on the outer
- * packet. The outer flags in these entries exist only to avoid
- * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
- * afterwards.
- *
- * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
- * RTE_PTYPE_TUNNEL_GRENAT..
- */
- static const uint32_t cq_type_table[128] __rte_cache_aligned = {
- [0x00] = RTE_PTYPE_UNKNOWN,
- [0x01] = RTE_PTYPE_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER,
- [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
- [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
- [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
- [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
- [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- /* All others reserved */
- };
- cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
- | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
- | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
- return cq_type_table[cqrd_flags + tnl];
-}
-
-static inline void
-enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t bwflags, pkt_flags = 0, vlan_tci;
- bwflags = enic_cq_rx_desc_bwflags(cqrd);
- vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-
- /* VLAN STRIPPED flag. The L2 packet type updated here also */
- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
- pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
- } else {
- if (vlan_tci != 0)
- mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
- else
- mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
- }
- mbuf->vlan_tci = vlan_tci;
-
- if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
- struct cq_enet_rq_clsf_desc *clsf_cqd;
- uint16_t filter_id;
- clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
- filter_id = clsf_cqd->filter_id;
- if (filter_id) {
- pkt_flags |= PKT_RX_FDIR;
- if (filter_id != ENIC_MAGIC_FILTER_ID) {
- mbuf->hash.fdir.hi = clsf_cqd->filter_id;
- pkt_flags |= PKT_RX_FDIR_ID;
- }
- }
- } else if (enic_cq_rx_desc_rss_type(cqrd)) {
- /* RSS flag */
- pkt_flags |= PKT_RX_RSS_HASH;
- mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
- }
-
- /* checksum flags */
- if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
- if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
- uint32_t l4_flags;
- l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
-
- /*
- * When overlay offload is enabled, the NIC may
- * set ipv4_csum_ok=1 if the inner packet is IPv6..
- * So, explicitly check for IPv4 before checking
- * ipv4_csum_ok.
- */
- if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
- if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
- pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
- else
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
- }
-
- if (l4_flags == RTE_PTYPE_L4_UDP ||
- l4_flags == RTE_PTYPE_L4_TCP) {
- if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
- pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
- else
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
- }
- }
- }
-
- mbuf->ol_flags = pkt_flags;
-}
-
/* dummy receive function to replace actual function in
* order to do safe reconfiguration operations.
*/
@@ -707,7 +448,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
wq_desc_avail = vnic_wq_desc_avail(wq);
head_idx = wq->head_idx;
desc_count = wq->ring.desc_count;
- ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+ ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
tx_oversized = &enic->soft_stats.tx_oversized;
nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
@@ -735,7 +476,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
mss = 0;
vlan_id = tx_pkt->vlan_tci;
- vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
+ vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN);
bus_addr = (dma_addr_t)
(tx_pkt->buf_iova + tx_pkt->data_off);
@@ -840,12 +581,33 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,
struct enic *enic)
{
struct rte_mbuf *p;
+ uint16_t mss;
while (n) {
n--;
p = *pkts++;
desc->address = p->buf_iova + p->data_off;
desc->length = p->pkt_len;
+ /* VLAN insert */
+ desc->vlan_tag = p->vlan_tci;
+ desc->header_length_flags &=
+ ((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
+ if (p->ol_flags & PKT_TX_VLAN) {
+ desc->header_length_flags |=
+ 1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
+ }
+ /*
+ * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which
+ * is 0, so no need to set offload_mode.
+ */
+ mss = 0;
+ if (p->ol_flags & PKT_TX_IP_CKSUM)
+ mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
+ if (p->ol_flags & PKT_TX_L4_MASK)
+ mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
+ desc->mss_loopback = mss;
+
/*
* The app should not send oversized
* packets. tx_pkt_prepare includes a check as
diff --git a/drivers/net/enic/enic_rxtx_common.h b/drivers/net/enic/enic_rxtx_common.h
new file mode 100644
index 00000000..bfbb4909
--- /dev/null
+++ b/drivers/net/enic/enic_rxtx_common.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _ENIC_RXTX_COMMON_H_
+#define _ENIC_RXTX_COMMON_H_
+
+static inline uint16_t
+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
+{
+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline uint16_t
+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
+{
+ return le16_to_cpu(crd->bytes_written_flags) &
+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_packet_error(uint16_t bwflags)
+{
+ return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_eop(uint16_t ciflags)
+{
+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
+{
+ return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
+{
+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+}
+
+static inline uint32_t
+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
+{
+ return le32_to_cpu(cqrd->rss_hash);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
+{
+ return le16_to_cpu(cqrd->vlan);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ return le16_to_cpu(cqrd->bytes_written_flags) &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+
+static inline uint8_t
+enic_cq_rx_check_err(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t bwflags;
+
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
+ return 1;
+ return 0;
+}
+
+/* Lookup table to translate RX CQ flags to mbuf flags. */
+static uint32_t
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint8_t cqrd_flags = cqrd->flags;
+ /*
+ * Odd-numbered entries are for tunnel packets. All packet type info
+ * applies to the inner packet, and there is no info on the outer
+ * packet. The outer flags in these entries exist only to avoid
+ * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
+ * afterwards.
+ *
+ * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
+ * RTE_PTYPE_TUNNEL_GRENAT..
+ */
+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
+ [0x00] = RTE_PTYPE_UNKNOWN,
+ [0x01] = RTE_PTYPE_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ /* All others reserved */
+ };
+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
+ return cq_type_table[cqrd_flags + tnl];
+}
+
+static void
+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t bwflags, pkt_flags = 0, vlan_tci;
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+
+ /* VLAN STRIPPED flag. The L2 packet type updated here also */
+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
+ pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+ } else {
+ if (vlan_tci != 0) {
+ pkt_flags |= PKT_RX_VLAN;
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ } else {
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+ }
+ }
+ mbuf->vlan_tci = vlan_tci;
+
+ if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
+ struct cq_enet_rq_clsf_desc *clsf_cqd;
+ uint16_t filter_id;
+ clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
+ filter_id = clsf_cqd->filter_id;
+ if (filter_id) {
+ pkt_flags |= PKT_RX_FDIR;
+ if (filter_id != ENIC_MAGIC_FILTER_ID) {
+ mbuf->hash.fdir.hi = clsf_cqd->filter_id;
+ pkt_flags |= PKT_RX_FDIR_ID;
+ }
+ }
+ } else if (enic_cq_rx_desc_rss_type(cqrd)) {
+ /* RSS flag */
+ pkt_flags |= PKT_RX_RSS_HASH;
+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
+ }
+
+ /* checksum flags */
+ if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
+ if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
+ uint32_t l4_flags;
+ l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+
+ /*
+ * When overlay offload is enabled, the NIC may
+ * set ipv4_csum_ok=1 if the inner packet is IPv6..
+ * So, explicitly check for IPv4 before checking
+ * ipv4_csum_ok.
+ */
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ }
+
+ if (l4_flags == RTE_PTYPE_L4_UDP ||
+ l4_flags == RTE_PTYPE_L4_TCP) {
+ if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ }
+
+ mbuf->ol_flags = pkt_flags;
+}
+
+#endif /* _ENIC_RXTX_COMMON_H_ */
diff --git a/drivers/net/enic/enic_rxtx_vec_avx2.c b/drivers/net/enic/enic_rxtx_vec_avx2.c
new file mode 100644
index 00000000..d2185490
--- /dev/null
+++ b/drivers/net/enic/enic_rxtx_vec_avx2.c
@@ -0,0 +1,831 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2018 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev_driver.h>
+
+#include "enic_compat.h"
+#include "rq_enet_desc.h"
+#include "enic.h"
+#include "enic_rxtx_common.h"
+
+#include <x86intrin.h>
+
+static struct rte_mbuf *
+rx_one(struct cq_enet_rq_desc *cqd, struct rte_mbuf *mb, struct enic *enic)
+{
+ bool tnl;
+
+ *(uint64_t *)&mb->rearm_data = enic->mbuf_initializer;
+ mb->data_len = cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ mb->pkt_len = mb->data_len;
+ tnl = enic->overlay_offload && (cqd->completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
+ mb->packet_type =
+ enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd, tnl);
+ enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
+ return mb;
+}
+
+static uint16_t
+enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **rx, **rxmb;
+ uint16_t cq_idx, nb_rx, max_rx;
+ struct cq_enet_rq_desc *cqd;
+ struct rq_enet_desc *rqd;
+ struct vnic_cq *cq;
+ struct vnic_rq *rq;
+ struct enic *enic;
+ uint8_t color;
+
+ rq = rx_queue;
+ enic = vnic_dev_priv(rq->vdev);
+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ cq_idx = cq->to_clean;
+
+ /*
+ * Fill up the reserve of free mbufs. Below, we restock the receive
+ * ring with these mbufs to avoid allocation failures.
+ */
+ if (rq->num_free_mbufs == 0) {
+ if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
+ ENIC_RX_BURST_MAX))
+ return 0;
+ rq->num_free_mbufs = ENIC_RX_BURST_MAX;
+ }
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
+ max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
+
+ rxmb = rq->mbuf_ring + cq_idx;
+ color = cq->last_color;
+ cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
+ rx = rx_pkts;
+ if (max_rx == 0 ||
+ (cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+ return 0;
+
+ /* Step 1: Process one packet to do aligned 256-bit load below */
+ if (cq_idx & 0x1) {
+ if (unlikely(cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
+ rte_pktmbuf_free(*rxmb++);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ } else {
+ *rx++ = rx_one(cqd, *rxmb++, enic);
+ }
+ cqd++;
+ max_rx--;
+ }
+
+ const __m256i mask =
+ _mm256_set_epi8(/* Second descriptor */
+ 0xff, /* type_color */
+ (CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT |
+ CQ_ENET_RQ_DESC_FLAGS_IPV4 |
+ CQ_ENET_RQ_DESC_FLAGS_IPV6 |
+ CQ_ENET_RQ_DESC_FLAGS_TCP |
+ CQ_ENET_RQ_DESC_FLAGS_UDP), /* flags */
+ 0, 0, /* checksum_fcoe */
+ 0xff, 0xff, /* vlan */
+ 0x3f, 0xff, /* bytes_written_flags */
+ 0xff, 0xff, 0xff, 0xff, /* rss_hash */
+ 0xff, 0xff, /* q_number_rss_type_flags */
+ 0, 0, /* completed_index_flags */
+ /* First descriptor */
+ 0xff, /* type_color */
+ (CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT |
+ CQ_ENET_RQ_DESC_FLAGS_IPV4 |
+ CQ_ENET_RQ_DESC_FLAGS_IPV6 |
+ CQ_ENET_RQ_DESC_FLAGS_TCP |
+ CQ_ENET_RQ_DESC_FLAGS_UDP), /* flags */
+ 0, 0, /* checksum_fcoe */
+ 0xff, 0xff, /* vlan */
+ 0x3f, 0xff, /* bytes_written_flags */
+ 0xff, 0xff, 0xff, 0xff, /* rss_hash */
+ 0xff, 0xff, /* q_number_rss_type_flags */
+ 0, 0 /* completed_index_flags */
+ );
+ const __m256i shuffle_mask =
+ _mm256_set_epi8(/* Second descriptor */
+ 7, 6, 5, 4, /* rss = rss_hash */
+ 11, 10, /* vlan_tci = vlan */
+ 9, 8, /* data_len = bytes_written */
+ 0x80, 0x80, 9, 8, /* pkt_len = bytes_written */
+ 0x80, 0x80, 0x80, 0x80, /* packet_type = 0 */
+ /* First descriptor */
+ 7, 6, 5, 4, /* rss = rss_hash */
+ 11, 10, /* vlan_tci = vlan */
+ 9, 8, /* data_len = bytes_written */
+ 0x80, 0x80, 9, 8, /* pkt_len = bytes_written */
+ 0x80, 0x80, 0x80, 0x80 /* packet_type = 0 */
+ );
+ /* Used to collect 8 flags from 8 desc into one register */
+ const __m256i flags_shuffle_mask =
+ _mm256_set_epi8(/* Second descriptor */
+ 1, 3, 9, 14,
+ 1, 3, 9, 14,
+ 1, 3, 9, 14,
+ 1, 3, 9, 14,
+ /* First descriptor */
+ 1, 3, 9, 14,
+ 1, 3, 9, 14,
+ 1, 3, 9, 14,
+ /*
+ * Byte 3: upper byte of completed_index_flags
+ * bit 5 = fcoe (tunnel)
+ * Byte 2: upper byte of q_number_rss_type_flags
+ * bits 2,3,4,5 = rss type
+ * bit 6 = csum_not_calc
+ * Byte 1: upper byte of bytes_written_flags
+ * bit 6 = truncated
+ * bit 7 = vlan stripped
+ * Byte 0: flags
+ */
+ 1, 3, 9, 14
+ );
+ /* Used to collect 8 VLAN IDs from 8 desc into one register */
+ const __m256i vlan_shuffle_mask =
+ _mm256_set_epi8(/* Second descriptor */
+ 0x80, 0x80, 11, 10,
+ 0x80, 0x80, 11, 10,
+ 0x80, 0x80, 11, 10,
+ 0x80, 0x80, 11, 10,
+ /* First descriptor */
+ 0x80, 0x80, 11, 10,
+ 0x80, 0x80, 11, 10,
+ 0x80, 0x80, 11, 10,
+ 0x80, 0x80, 11, 10);
+ /* PKT_RX_RSS_HASH is 1<<1 so fits in 8-bit integer */
+ const __m256i rss_shuffle =
+ _mm256_set_epi8(/* second 128 bits */
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, /* rss_types = 0 */
+ /* first 128 bits */
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0 /* rss_types = 0 */);
+ /*
+ * VLAN offload flags.
+ * shuffle index:
+ * vlan_stripped => bit 0
+ * vlan_id == 0 => bit 1
+ */
+ const __m256i vlan_shuffle =
+ _mm256_set_epi32(0, 0, 0, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN);
+ /* Use the same shuffle index as vlan_shuffle */
+ const __m256i vlan_ptype_shuffle =
+ _mm256_set_epi32(0, 0, 0, 0,
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN);
+ /*
+ * CKSUM flags. Shift right so they fit int 8-bit integers.
+ * shuffle index:
+ * ipv4_csum_ok => bit 3
+ * ip4 => bit 2
+ * tcp_or_udp => bit 1
+ * tcp_udp_csum_ok => bit 0
+ */
+ const __m256i csum_shuffle =
+ _mm256_set_epi8(/* second 128 bits */
+ /* 1111 ip4+ip4_ok+l4+l4_ok */
+ ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ /* 1110 ip4_ok+ip4+l4+!l4_ok */
+ ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1),
+ (PKT_RX_IP_CKSUM_GOOD >> 1), /* 1101 ip4+ip4_ok */
+ (PKT_RX_IP_CKSUM_GOOD >> 1), /* 1100 ip4_ok+ip4 */
+ (PKT_RX_L4_CKSUM_GOOD >> 1), /* 1011 l4+l4_ok */
+ (PKT_RX_L4_CKSUM_BAD >> 1), /* 1010 l4+!l4_ok */
+ 0, /* 1001 */
+ 0, /* 1000 */
+ /* 0111 !ip4_ok+ip4+l4+l4_ok */
+ ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ /* 0110 !ip4_ok+ip4+l4+!l4_ok */
+ ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1),
+ (PKT_RX_IP_CKSUM_BAD >> 1), /* 0101 !ip4_ok+ip4 */
+ (PKT_RX_IP_CKSUM_BAD >> 1), /* 0100 !ip4_ok+ip4 */
+ (PKT_RX_L4_CKSUM_GOOD >> 1), /* 0011 l4+l4_ok */
+ (PKT_RX_L4_CKSUM_BAD >> 1), /* 0010 l4+!l4_ok */
+ 0, /* 0001 */
+ 0, /* 0000 */
+ /* first 128 bits */
+ ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1),
+ (PKT_RX_IP_CKSUM_GOOD >> 1),
+ (PKT_RX_IP_CKSUM_GOOD >> 1),
+ (PKT_RX_L4_CKSUM_GOOD >> 1),
+ (PKT_RX_L4_CKSUM_BAD >> 1),
+ 0, 0,
+ ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ ((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1),
+ (PKT_RX_IP_CKSUM_BAD >> 1),
+ (PKT_RX_IP_CKSUM_BAD >> 1),
+ (PKT_RX_L4_CKSUM_GOOD >> 1),
+ (PKT_RX_L4_CKSUM_BAD >> 1),
+ 0, 0);
+ /*
+ * Non-fragment PTYPEs.
+ * Shuffle 4-bit index:
+ * ip6 => bit 0
+ * ip4 => bit 1
+ * udp => bit 2
+ * tcp => bit 3
+ * bit
+ * 3 2 1 0
+ * -------
+ * 0 0 0 0 unknown
+ * 0 0 0 1 ip6 | nonfrag
+ * 0 0 1 0 ip4 | nonfrag
+ * 0 0 1 1 unknown
+ * 0 1 0 0 unknown
+ * 0 1 0 1 ip6 | udp
+ * 0 1 1 0 ip4 | udp
+ * 0 1 1 1 unknown
+ * 1 0 0 0 unknown
+ * 1 0 0 1 ip6 | tcp
+ * 1 0 1 0 ip4 | tcp
+ * 1 0 1 1 unknown
+ * 1 1 0 0 unknown
+ * 1 1 0 1 unknown
+ * 1 1 1 0 unknown
+ * 1 1 1 1 unknown
+ *
+ * PTYPEs do not fit in 8 bits, so shift right 4..
+ */
+ const __m256i nonfrag_ptype_shuffle =
+ _mm256_set_epi8(/* second 128 bits */
+ RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG) >> 4,
+ RTE_PTYPE_UNKNOWN,
+ /* first 128 bits */
+ RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG) >> 4,
+ RTE_PTYPE_UNKNOWN);
+ /* Fragment PTYPEs. Use the same shuffle index as above. */
+ const __m256i frag_ptype_shuffle =
+ _mm256_set_epi8(/* second 128 bits */
+ RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ RTE_PTYPE_UNKNOWN,
+ /* first 128 bits */
+ RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ (RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ (RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG) >> 4,
+ RTE_PTYPE_UNKNOWN);
+ /*
+ * Tunnel PTYPEs. Use the same shuffle index as above.
+ * L4 types are not part of this table. They come from non-tunnel
+ * types above.
+ */
+ const __m256i tnl_l3_ptype_shuffle =
+ _mm256_set_epi8(/* second 128 bits */
+ RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_UNKNOWN,
+ /* first 128 bits */
+ RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_UNKNOWN, RTE_PTYPE_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN >> 16,
+ RTE_PTYPE_UNKNOWN);
+
+ const __m256i mbuf_init = _mm256_set_epi64x(0, enic->mbuf_initializer,
+ 0, enic->mbuf_initializer);
+
+ /*
+ * --- cq desc fields --- offset
+ * completed_index_flags - 0 use: fcoe
+ * q_number_rss_type_flags - 2 use: rss types, csum_not_calc
+ * rss_hash - 4 ==> mbuf.hash.rss
+ * bytes_written_flags - 8 ==> mbuf.pkt_len,data_len
+ * use: truncated, vlan_stripped
+ * vlan - 10 ==> mbuf.vlan_tci
+ * checksum_fcoe - 12 (unused)
+ * flags - 14 use: all bits
+ * type_color - 15 (unused)
+ *
+ * --- mbuf fields --- offset
+ * rearm_data ---- 16
+ * data_off - 0 (mbuf_init) -+
+ * refcnt - 2 (mbuf_init) |
+ * nb_segs - 4 (mbuf_init) | 16B 128b
+ * port - 6 (mbuf_init) |
+ * ol_flag - 8 (from cqd) -+
+ * rx_descriptor_fields1 ---- 32
+ * packet_type - 0 (from cqd) -+
+ * pkt_len - 4 (from cqd) |
+ * data_len - 8 (from cqd) | 16B 128b
+ * vlan_tci - 10 (from cqd) |
+ * rss - 12 (from cqd) -+
+ */
+
+ __m256i overlay_enabled =
+ _mm256_set1_epi32((uint32_t)enic->overlay_offload);
+
+ /* Step 2: Process 8 packets per loop using SIMD */
+ while (max_rx > 7 && (((cqd + 7)->type_color &
+ CQ_DESC_COLOR_MASK_NOSHIFT) != color)) {
+ /* Load 8 16B CQ descriptors */
+ __m256i cqd01 = _mm256_load_si256((void *)cqd);
+ __m256i cqd23 = _mm256_load_si256((void *)(cqd + 2));
+ __m256i cqd45 = _mm256_load_si256((void *)(cqd + 4));
+ __m256i cqd67 = _mm256_load_si256((void *)(cqd + 6));
+ /* Copy 8 mbuf pointers to rx_pkts */
+ _mm256_storeu_si256((void *)rx,
+ _mm256_loadu_si256((void *)rxmb));
+ _mm256_storeu_si256((void *)(rx + 4),
+ _mm256_loadu_si256((void *)(rxmb + 4)));
+
+ /*
+ * Collect 8 flags (each 32 bits) into one register.
+ * 4 shuffles, 3 blends, 1 permute for 8 desc: 1 inst/desc
+ */
+ __m256i flags01 =
+ _mm256_shuffle_epi8(cqd01, flags_shuffle_mask);
+ /*
+ * Shuffle above produces 8 x 32-bit flags for 8 descriptors
+ * in this order: 0, 0, 0, 0, 1, 1, 1, 1
+ * The duplicates in each 128-bit lane simplifies blending
+ * below.
+ */
+ __m256i flags23 =
+ _mm256_shuffle_epi8(cqd23, flags_shuffle_mask);
+ __m256i flags45 =
+ _mm256_shuffle_epi8(cqd45, flags_shuffle_mask);
+ __m256i flags67 =
+ _mm256_shuffle_epi8(cqd67, flags_shuffle_mask);
+ /* 1st blend produces flags for desc: 0, 2, 0, 0, 1, 3, 1, 1 */
+ __m256i flags0_3 = _mm256_blend_epi32(flags01, flags23, 0x22);
+ /* 2nd blend produces flags for desc: 4, 4, 4, 6, 5, 5, 5, 7 */
+ __m256i flags4_7 = _mm256_blend_epi32(flags45, flags67, 0x88);
+ /* 3rd blend produces flags for desc: 0, 2, 4, 6, 1, 3, 5, 7 */
+ __m256i flags0_7 = _mm256_blend_epi32(flags0_3, flags4_7, 0xcc);
+ /*
+ * Swap to reorder flags in this order: 1, 3, 5, 7, 0, 2, 4, 6
+ * This order simplifies blend operations way below that
+ * produce 'rearm' data for each mbuf.
+ */
+ flags0_7 = _mm256_permute4x64_epi64(flags0_7,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2);
+
+ /*
+ * Check truncated bits and bail out early on.
+ * 6 avx inst, 1 or, 1 if-then-else for 8 desc: 1 inst/desc
+ */
+ __m256i trunc =
+ _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 17), 31);
+ trunc = _mm256_add_epi64(trunc, _mm256_permute4x64_epi64(trunc,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2));
+ /* 0:63 contains 1+3+0+2 and 64:127 contains 5+7+4+6 */
+ if (_mm256_extract_epi64(trunc, 0) ||
+ _mm256_extract_epi64(trunc, 1))
+ break;
+
+ /*
+ * Compute PKT_RX_RSS_HASH.
+ * Use 2 shifts and 1 shuffle for 8 desc: 0.375 inst/desc
+ * RSS types in byte 0, 4, 8, 12, 16, 20, 24, 28
+ * Everything else is zero.
+ */
+ __m256i rss_types =
+ _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 10), 28);
+ /*
+ * RSS flags (PKT_RX_RSS_HASH) are in
+ * byte 0, 4, 8, 12, 16, 20, 24, 28
+ * Everything else is zero.
+ */
+ __m256i rss_flags = _mm256_shuffle_epi8(rss_shuffle, rss_types);
+
+ /*
+ * Compute CKSUM flags. First build the index and then
+ * use it to shuffle csum_shuffle.
+ * 20 instructions including const loads: 2.5 inst/desc
+ */
+ /*
+ * csum_not_calc (bit 22)
+ * csum_not_calc (0) => 0xffffffff
+ * csum_not_calc (1) => 0x0
+ */
+ const __m256i zero4 = _mm256_setzero_si256();
+ const __m256i mask22 = _mm256_set1_epi32(0x400000);
+ __m256i csum_not_calc = _mm256_cmpeq_epi32(zero4,
+ _mm256_and_si256(flags0_7, mask22));
+ /*
+ * (tcp|udp) && !fragment => bit 1
+ * tcp = bit 2, udp = bit 1, frag = bit 6
+ */
+ const __m256i mask1 = _mm256_set1_epi32(0x2);
+ __m256i tcp_udp =
+ _mm256_andnot_si256(_mm256_srli_epi32(flags0_7, 5),
+ _mm256_or_si256(flags0_7,
+ _mm256_srli_epi32(flags0_7, 1)));
+ tcp_udp = _mm256_and_si256(tcp_udp, mask1);
+ /* ipv4 (bit 5) => bit 2 */
+ const __m256i mask2 = _mm256_set1_epi32(0x4);
+ __m256i ipv4 = _mm256_and_si256(mask2,
+ _mm256_srli_epi32(flags0_7, 3));
+ /*
+ * ipv4_csum_ok (bit 3) => bit 3
+ * tcp_udp_csum_ok (bit 0) => bit 0
+ * 0x9
+ */
+ const __m256i mask0_3 = _mm256_set1_epi32(0x9);
+ __m256i csum_idx = _mm256_and_si256(flags0_7, mask0_3);
+ csum_idx = _mm256_and_si256(csum_not_calc,
+ _mm256_or_si256(_mm256_or_si256(csum_idx, ipv4),
+ tcp_udp));
+ __m256i csum_flags =
+ _mm256_shuffle_epi8(csum_shuffle, csum_idx);
+ /* Shift left to restore CKSUM flags. See csum_shuffle. */
+ csum_flags = _mm256_slli_epi32(csum_flags, 1);
+ /* Combine csum flags and offload flags: 0.125 inst/desc */
+ rss_flags = _mm256_or_si256(rss_flags, csum_flags);
+
+ /*
+ * Collect 8 VLAN IDs and compute vlan_id != 0 on each.
+ * 4 shuffles, 3 blends, 1 permute, 1 cmp, 1 sub for 8 desc:
+ * 1.25 inst/desc
+ */
+ __m256i vlan01 = _mm256_shuffle_epi8(cqd01, vlan_shuffle_mask);
+ __m256i vlan23 = _mm256_shuffle_epi8(cqd23, vlan_shuffle_mask);
+ __m256i vlan45 = _mm256_shuffle_epi8(cqd45, vlan_shuffle_mask);
+ __m256i vlan67 = _mm256_shuffle_epi8(cqd67, vlan_shuffle_mask);
+ __m256i vlan0_3 = _mm256_blend_epi32(vlan01, vlan23, 0x22);
+ __m256i vlan4_7 = _mm256_blend_epi32(vlan45, vlan67, 0x88);
+ /* desc: 0, 2, 4, 6, 1, 3, 5, 7 */
+ __m256i vlan0_7 = _mm256_blend_epi32(vlan0_3, vlan4_7, 0xcc);
+ /* desc: 1, 3, 5, 7, 0, 2, 4, 6 */
+ vlan0_7 = _mm256_permute4x64_epi64(vlan0_7,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2);
+ /*
+ * Compare 0 == vlan_id produces 0xffffffff (-1) if
+ * vlan 0 and 0 if vlan non-0. Then subtracting the
+ * result from 0 produces 0 - (-1) = 1 for vlan 0, and
+ * 0 - 0 = 0 for vlan non-0.
+ */
+ vlan0_7 = _mm256_cmpeq_epi32(zero4, vlan0_7);
+ /* vlan_id != 0 => 0, vlan_id == 0 => 1 */
+ vlan0_7 = _mm256_sub_epi32(zero4, vlan0_7);
+
+ /*
+ * Compute PKT_RX_VLAN and PKT_RX_VLAN_STRIPPED.
+ * Use 3 shifts, 1 or, 1 shuffle for 8 desc: 0.625 inst/desc
+ * VLAN offload flags in byte 0, 4, 8, 12, 16, 20, 24, 28
+ * Everything else is zero.
+ */
+ __m256i vlan_idx =
+ _mm256_or_si256(/* vlan_stripped => bit 0 */
+ _mm256_srli_epi32(_mm256_slli_epi32(flags0_7,
+ 16), 31),
+ /* (vlan_id == 0) => bit 1 */
+ _mm256_slli_epi32(vlan0_7, 1));
+ /*
+ * The index captures 4 cases.
+ * stripped, id = 0 ==> 11b = 3
+ * stripped, id != 0 ==> 01b = 1
+ * not strip, id == 0 ==> 10b = 2
+ * not strip, id != 0 ==> 00b = 0
+ */
+ __m256i vlan_flags = _mm256_permutevar8x32_epi32(vlan_shuffle,
+ vlan_idx);
+ /* Combine vlan and offload flags: 0.125 inst/desc */
+ rss_flags = _mm256_or_si256(rss_flags, vlan_flags);
+
+ /*
+ * Compute non-tunnel PTYPEs.
+ * 17 inst / 8 desc = 2.125 inst/desc
+ */
+ /* ETHER and ETHER_VLAN */
+ __m256i vlan_ptype =
+ _mm256_permutevar8x32_epi32(vlan_ptype_shuffle,
+ vlan_idx);
+ /* Build the ptype index from flags */
+ tcp_udp = _mm256_slli_epi32(flags0_7, 29);
+ tcp_udp = _mm256_slli_epi32(_mm256_srli_epi32(tcp_udp, 30), 2);
+ __m256i ip4_ip6 =
+ _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 26), 30);
+ __m256i ptype_idx = _mm256_or_si256(tcp_udp, ip4_ip6);
+ __m256i frag_bit =
+ _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 25), 31);
+ __m256i nonfrag_ptype =
+ _mm256_shuffle_epi8(nonfrag_ptype_shuffle, ptype_idx);
+ __m256i frag_ptype =
+ _mm256_shuffle_epi8(frag_ptype_shuffle, ptype_idx);
+ /*
+ * Zero out the unwanted types and combine the remaining bits.
+ * The effect is same as selecting non-frag or frag types
+ * depending on the frag bit.
+ */
+ nonfrag_ptype = _mm256_and_si256(nonfrag_ptype,
+ _mm256_cmpeq_epi32(zero4, frag_bit));
+ frag_ptype = _mm256_and_si256(frag_ptype,
+ _mm256_cmpgt_epi32(frag_bit, zero4));
+ __m256i ptype = _mm256_or_si256(nonfrag_ptype, frag_ptype);
+ ptype = _mm256_slli_epi32(ptype, 4);
+ /*
+ * Compute tunnel PTYPEs.
+ * 15 inst / 8 desc = 1.875 inst/desc
+ */
+ __m256i tnl_l3_ptype =
+ _mm256_shuffle_epi8(tnl_l3_ptype_shuffle, ptype_idx);
+ tnl_l3_ptype = _mm256_slli_epi32(tnl_l3_ptype, 16);
+ /*
+ * Shift non-tunnel L4 types to make them tunnel types.
+ * RTE_PTYPE_L4_TCP << 16 == RTE_PTYPE_INNER_L4_TCP
+ */
+ __m256i tnl_l4_ptype =
+ _mm256_slli_epi32(_mm256_and_si256(ptype,
+ _mm256_set1_epi32(RTE_PTYPE_L4_MASK)), 16);
+ __m256i tnl_ptype =
+ _mm256_or_si256(tnl_l3_ptype, tnl_l4_ptype);
+ tnl_ptype = _mm256_or_si256(tnl_ptype,
+ _mm256_set1_epi32(RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER));
+ /*
+ * Select non-tunnel or tunnel types by zeroing out the
+ * unwanted ones.
+ */
+ __m256i tnl_flags = _mm256_and_si256(overlay_enabled,
+ _mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 2), 31));
+ tnl_ptype = _mm256_and_si256(tnl_ptype,
+ _mm256_sub_epi32(zero4, tnl_flags));
+ ptype = _mm256_and_si256(ptype,
+ _mm256_cmpeq_epi32(zero4, tnl_flags));
+ /*
+ * Combine types and swap to have ptypes in the same order
+ * as desc.
+ * desc: 0 2 4 6 1 3 5 7
+ * 3 inst / 8 desc = 0.375 inst/desc
+ */
+ ptype = _mm256_or_si256(ptype, tnl_ptype);
+ ptype = _mm256_or_si256(ptype, vlan_ptype);
+ ptype = _mm256_permute4x64_epi64(ptype,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2);
+
+ /*
+ * Mask packet length.
+ * Use 4 ands: 0.5 instructions/desc
+ */
+ cqd01 = _mm256_and_si256(cqd01, mask);
+ cqd23 = _mm256_and_si256(cqd23, mask);
+ cqd45 = _mm256_and_si256(cqd45, mask);
+ cqd67 = _mm256_and_si256(cqd67, mask);
+ /*
+ * Shuffle. Two 16B sets of the mbuf fields.
+ * packet_type, pkt_len, data_len, vlan_tci, rss
+ */
+ __m256i rearm01 = _mm256_shuffle_epi8(cqd01, shuffle_mask);
+ __m256i rearm23 = _mm256_shuffle_epi8(cqd23, shuffle_mask);
+ __m256i rearm45 = _mm256_shuffle_epi8(cqd45, shuffle_mask);
+ __m256i rearm67 = _mm256_shuffle_epi8(cqd67, shuffle_mask);
+
+ /*
+ * Blend in ptypes
+ * 4 blends and 3 shuffles for 8 desc: 0.875 inst/desc
+ */
+ rearm01 = _mm256_blend_epi32(rearm01, ptype, 0x11);
+ rearm23 = _mm256_blend_epi32(rearm23,
+ _mm256_shuffle_epi32(ptype, 1), 0x11);
+ rearm45 = _mm256_blend_epi32(rearm45,
+ _mm256_shuffle_epi32(ptype, 2), 0x11);
+ rearm67 = _mm256_blend_epi32(rearm67,
+ _mm256_shuffle_epi32(ptype, 3), 0x11);
+
+ /*
+ * Move rss_flags into ol_flags in mbuf_init.
+ * Use 1 shift and 1 blend for each desc: 2 inst/desc
+ */
+ __m256i mbuf_init4_5 = _mm256_blend_epi32(mbuf_init,
+ rss_flags, 0x44);
+ __m256i mbuf_init2_3 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(rss_flags, 4), 0x44);
+ __m256i mbuf_init0_1 = _mm256_blend_epi32(mbuf_init,
+ _mm256_slli_si256(rss_flags, 8), 0x44);
+ __m256i mbuf_init6_7 = _mm256_blend_epi32(mbuf_init,
+ _mm256_srli_si256(rss_flags, 4), 0x44);
+
+ /*
+ * Build rearm, one per desc.
+ * 8 blends and 4 permutes: 1.5 inst/desc
+ */
+ __m256i rearm0 = _mm256_blend_epi32(rearm01,
+ mbuf_init0_1, 0xf0);
+ __m256i rearm1 = _mm256_blend_epi32(mbuf_init0_1,
+ rearm01, 0xf0);
+ __m256i rearm2 = _mm256_blend_epi32(rearm23,
+ mbuf_init2_3, 0xf0);
+ __m256i rearm3 = _mm256_blend_epi32(mbuf_init2_3,
+ rearm23, 0xf0);
+ /* Swap upper and lower 64 bits */
+ rearm0 = _mm256_permute4x64_epi64(rearm0,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2);
+ rearm2 = _mm256_permute4x64_epi64(rearm2,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2);
+ /* Second set of 4 descriptors */
+ __m256i rearm4 = _mm256_blend_epi32(rearm45,
+ mbuf_init4_5, 0xf0);
+ __m256i rearm5 = _mm256_blend_epi32(mbuf_init4_5,
+ rearm45, 0xf0);
+ __m256i rearm6 = _mm256_blend_epi32(rearm67,
+ mbuf_init6_7, 0xf0);
+ __m256i rearm7 = _mm256_blend_epi32(mbuf_init6_7,
+ rearm67, 0xf0);
+ rearm4 = _mm256_permute4x64_epi64(rearm4,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2);
+ rearm6 = _mm256_permute4x64_epi64(rearm6,
+ (1 << 6) + (0 << 4) + (3 << 2) + 2);
+
+ /*
+ * Write out 32B of mbuf fields.
+ * data_off - off 0 (mbuf_init)
+ * refcnt - 2 (mbuf_init)
+ * nb_segs - 4 (mbuf_init)
+ * port - 6 (mbuf_init)
+ * ol_flag - 8 (from cqd)
+ * packet_type - 16 (from cqd)
+ * pkt_len - 20 (from cqd)
+ * data_len - 24 (from cqd)
+ * vlan_tci - 26 (from cqd)
+ * rss - 28 (from cqd)
+ */
+ _mm256_storeu_si256((__m256i *)&rxmb[0]->rearm_data, rearm0);
+ _mm256_storeu_si256((__m256i *)&rxmb[1]->rearm_data, rearm1);
+ _mm256_storeu_si256((__m256i *)&rxmb[2]->rearm_data, rearm2);
+ _mm256_storeu_si256((__m256i *)&rxmb[3]->rearm_data, rearm3);
+ _mm256_storeu_si256((__m256i *)&rxmb[4]->rearm_data, rearm4);
+ _mm256_storeu_si256((__m256i *)&rxmb[5]->rearm_data, rearm5);
+ _mm256_storeu_si256((__m256i *)&rxmb[6]->rearm_data, rearm6);
+ _mm256_storeu_si256((__m256i *)&rxmb[7]->rearm_data, rearm7);
+
+ max_rx -= 8;
+ cqd += 8;
+ rx += 8;
+ rxmb += 8;
+ }
+
+ /*
+ * Step 3: Slow path to handle a small (<8) number of packets and
+ * occasional truncated packets.
+ */
+ while (max_rx && ((cqd->type_color &
+ CQ_DESC_COLOR_MASK_NOSHIFT) != color)) {
+ if (unlikely(cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
+ rte_pktmbuf_free(*rxmb++);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ } else {
+ *rx++ = rx_one(cqd, *rxmb++, enic);
+ }
+ cqd++;
+ max_rx--;
+ }
+
+ /* Number of descriptors visited */
+ nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
+ if (nb_rx == 0)
+ return 0;
+ rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
+ rxmb = rq->mbuf_ring + cq_idx;
+ cq_idx += nb_rx;
+ rq->rx_nb_hold += nb_rx;
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
+ cq->to_clean = cq_idx;
+
+ /* Step 4: Restock RQ with new mbufs */
+ memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
+ sizeof(struct rte_mbuf *) * nb_rx);
+ rq->num_free_mbufs -= nb_rx;
+ while (nb_rx) {
+ rqd->address = (*rxmb)->buf_iova + RTE_PKTMBUF_HEADROOM;
+ nb_rx--;
+ rqd++;
+ rxmb++;
+ }
+ if (rq->rx_nb_hold > rq->rx_free_thresh) {
+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
+ rq->posted_index,
+ rq->rx_nb_hold);
+ rq->rx_nb_hold = 0;
+ rte_wmb();
+ iowrite32_relaxed(rq->posted_index,
+ &rq->ctrl->posted_index);
+ }
+
+ return rx - rx_pkts;
+}
+
+bool
+enic_use_vector_rx_handler(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev;
+ struct rte_fdir_conf *fconf;
+
+ eth_dev = enic->rte_dev;
+ /* User needs to request for the avx2 handler */
+ if (!enic->enable_avx2_rx)
+ return false;
+ /* Do not support scatter Rx */
+ if (!(enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0))
+ return false;
+ /* Do not support fdir/flow */
+ fconf = &eth_dev->data->dev_conf.fdir_conf;
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return false;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
+ PMD_INIT_LOG(DEBUG, " use the non-scatter avx2 Rx handler");
+ eth_dev->rx_pkt_burst = &enic_noscatter_vec_recv_pkts;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/enic/meson.build b/drivers/net/enic/meson.build
index bfd4e237..06448711 100644
--- a/drivers/net/enic/meson.build
+++ b/drivers/net/enic/meson.build
@@ -17,3 +17,19 @@ sources = files(
)
deps += ['hash']
includes += include_directories('base')
+
+# The current implementation assumes 64-bit pointers
+if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') and cc.sizeof('void *') == 8
+ sources += files('enic_rxtx_vec_avx2.c')
+# Build the avx2 handler if the compiler supports it, even though 'machine'
+# does not. This is to support users who build for the min supported machine
+# and need to run the binary on newer CPUs too.
+# This part is from i40e meson.build
+elif cc.has_argument('-mavx2') and cc.sizeof('void *') == 8
+ enic_avx2_lib = static_library('enic_avx2_lib',
+ 'enic_rxtx_vec_avx2.c',
+ dependencies: [static_rte_ethdev, static_rte_bus_pci],
+ include_directories: includes,
+ c_args: [cflags, '-mavx2'])
+ objs += enic_avx2_lib.extract_objects('enic_rxtx_vec_avx2.c')
+endif
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index 657919f9..06e859e9 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -71,7 +71,7 @@ failsafe_hotplug_alarm_install(struct rte_eth_dev *dev)
return -EINVAL;
if (PRIV(dev)->pending_alarm)
return 0;
- ret = rte_eal_alarm_set(hotplug_poll * 1000,
+ ret = rte_eal_alarm_set(failsafe_hotplug_poll * 1000,
fs_hotplug_alarm,
dev);
if (ret) {
@@ -225,7 +225,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
goto unregister_new_callback;
}
mac = &dev->data->mac_addrs[0];
- if (mac_from_arg) {
+ if (failsafe_mac_from_arg) {
/*
* If MAC address was provided as a parameter,
* apply to all probed slaves.
@@ -280,7 +280,8 @@ free_args:
free_subs:
fs_sub_device_free(dev);
free_dev:
- rte_free(PRIV(dev));
+ /* mac_addrs must not be freed alone because part of dev_private */
+ dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(dev);
return -1;
}
@@ -304,7 +305,9 @@ fs_rte_eth_free(const char *name)
ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
if (ret)
ERROR("Error while destroying hotplug mutex");
- rte_free(PRIV(dev));
+ rte_free(PRIV(dev)->mcast_addrs);
+ /* mac_addrs must not be freed alone because part of dev_private */
+ dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(dev);
return ret;
}
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
index 626883ce..c4b220c4 100644
--- a/drivers/net/failsafe/failsafe_args.c
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -22,10 +22,10 @@
typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params,
uint8_t head);
-uint64_t hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
-int mac_from_arg = 0;
+uint64_t failsafe_hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
+int failsafe_mac_from_arg;
-const char *pmd_failsafe_init_parameters[] = {
+static const char * const pmd_failsafe_init_parameters[] = {
PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
PMD_FAILSAFE_MAC_KVARG,
NULL,
@@ -420,7 +420,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
if (arg_count == 1) {
ret = rte_kvargs_process(kvlist,
PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
- &fs_get_u64_arg, &hotplug_poll);
+ &fs_get_u64_arg, &failsafe_hotplug_poll);
if (ret < 0)
goto free_kvlist;
}
@@ -435,7 +435,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
if (ret < 0)
goto free_kvlist;
- mac_from_arg = 1;
+ failsafe_mac_from_arg = 1;
}
}
PRIV(dev)->state = DEV_PARSED;
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
index ce1633f1..8a888b1f 100644
--- a/drivers/net/failsafe/failsafe_eal.c
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -144,8 +144,7 @@ fs_bus_uninit(struct rte_eth_dev *dev)
int ret = 0;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
- sdev_ret = rte_eal_hotplug_remove(sdev->bus->name,
- sdev->dev->name);
+ sdev_ret = rte_dev_remove(sdev->dev);
if (sdev_ret) {
ERROR("Failed to remove requested device %s (err: %d)",
sdev->dev->name, sdev_ret);
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 5b5cb3b4..17831652 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -179,6 +179,23 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
return ret;
}
}
+ /*
+ * Propagate multicast MAC addresses to sub-devices,
+ * if non zero number of addresses is set.
+ * The condition is required to avoid breakage of failsafe
+ * for sub-devices which do not support the operation
+ * if the feature is really not used.
+ */
+ if (PRIV(dev)->nb_mcast_addr > 0) {
+ DEBUG("Configuring multicast MAC addresses");
+ ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ PRIV(dev)->mcast_addrs,
+ PRIV(dev)->nb_mcast_addr);
+ if (ret) {
+ ERROR("Failed to apply multicast MAC addresses");
+ return ret;
+ }
+ }
/* VLAN filter */
vfc1 = &dev->data->vlan_filter_conf;
vfc2 = &edev->data->vlan_filter_conf;
@@ -230,9 +247,9 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
DEBUG("Creating flow #%" PRIu32, i++);
flow->flows[SUB_ID(sdev)] =
rte_flow_create(PORT_ID(sdev),
- &flow->fd->attr,
- flow->fd->items,
- flow->fd->actions,
+ flow->rule.attr,
+ flow->rule.pattern,
+ flow->rule.actions,
&ferror);
ret = rte_errno;
if (ret)
@@ -265,8 +282,7 @@ fs_dev_remove(struct sub_device *sdev)
sdev->state = DEV_PROBED;
/* fallthrough */
case DEV_PROBED:
- ret = rte_eal_hotplug_remove(sdev->bus->name,
- sdev->dev->name);
+ ret = rte_dev_remove(sdev->dev);
if (ret) {
ERROR("Bus detach failed for sub_device %u",
SUB_ID(sdev));
@@ -366,6 +382,88 @@ failsafe_dev_remove(struct rte_eth_dev *dev)
}
}
+static int
+failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
+{
+ struct rxq *rxq;
+ int ret;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->info.conf.rx_deferred_start &&
+ dev->data->rx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ /*
+ * The subdevice Rx queue does not launch on device
+ * start if deferred start flag is set. It needs to be
+ * started manually in case an appropriate failsafe Rx
+ * queue has been started earlier.
+ */
+ ret = dev->dev_ops->rx_queue_start(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Rx queue %d", i);
+ return ret;
+ }
+ } else if (dev->data->rx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ /*
+ * The subdevice Rx queue needs to be stopped manually
+ * in case an appropriate failsafe Rx queue has been
+ * stopped earlier.
+ */
+ ret = dev->dev_ops->rx_queue_stop(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Rx queue %d", i);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
+{
+ struct txq *txq;
+ int ret;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ if (txq->info.conf.tx_deferred_start &&
+ dev->data->tx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ /*
+ * The subdevice Tx queue does not launch on device
+ * start if deferred start flag is set. It needs to be
+ * started manually in case an appropriate failsafe Tx
+ * queue has been started earlier.
+ */
+ ret = dev->dev_ops->tx_queue_start(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Tx queue %d", i);
+ return ret;
+ }
+ } else if (dev->data->tx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ /*
+ * The subdevice Tx queue needs to be stopped manually
+ * in case an appropriate failsafe Tx queue has been
+ * stopped earlier.
+ */
+ ret = dev->dev_ops->tx_queue_stop(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Tx queue %d", i);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
int
failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
{
@@ -424,6 +522,12 @@ failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
ret = dev->dev_ops->dev_start(dev);
if (ret)
goto err_remove;
+ ret = failsafe_eth_dev_rx_queues_sync(dev);
+ if (ret)
+ goto err_remove;
+ ret = failsafe_eth_dev_tx_queues_sync(dev);
+ if (ret)
+ goto err_remove;
return 0;
err_remove:
FOREACH_SUBDEV(sdev, i, dev)
@@ -466,7 +570,7 @@ failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
/* Switch as soon as possible tx_dev. */
fs_switch_dev(sdev->fs_dev, sdev);
/* Use safe bursts in any case. */
- set_burst_fn(sdev->fs_dev, 1);
+ failsafe_set_burst_fn(sdev->fs_dev, 1);
/*
* Async removal, the sub-PMD will try to unregister
* the callback at the source of the current thread context.
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
index bfe42fce..5e2b5f7c 100644
--- a/drivers/net/failsafe/failsafe_flow.c
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -3,8 +3,11 @@
* Copyright 2017 Mellanox Technologies, Ltd
*/
+#include <stddef.h>
+#include <string.h>
#include <sys/queue.h>
+#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_tailq.h>
#include <rte_flow.h>
@@ -18,19 +21,33 @@ fs_flow_allocate(const struct rte_flow_attr *attr,
const struct rte_flow_action *actions)
{
struct rte_flow *flow;
- size_t fdsz;
+ const struct rte_flow_conv_rule rule = {
+ .attr_ro = attr,
+ .pattern_ro = items,
+ .actions_ro = actions,
+ };
+ struct rte_flow_error error;
+ int ret;
- fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
- flow = rte_zmalloc(NULL,
- sizeof(struct rte_flow) + fdsz,
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error);
+ if (ret < 0) {
+ ERROR("Unable to process flow rule (%s): %s",
+ error.message ? error.message : "unspecified",
+ strerror(rte_errno));
+ return NULL;
+ }
+ flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret,
RTE_CACHE_LINE_SIZE);
if (flow == NULL) {
ERROR("Could not allocate new flow");
return NULL;
}
- flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
- if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) {
- ERROR("Failed to copy flow description");
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule,
+ &error);
+ if (ret < 0) {
+ ERROR("Failed to copy flow rule (%s): %s",
+ error.message ? error.message : "unspecified",
+ strerror(rte_errno));
rte_free(flow);
return NULL;
}
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index fc6ec37f..1c2cb71c 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -372,7 +372,7 @@ void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev)
for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
if (qid < fsdev->data->nb_rx_queues) {
fsrxq = fsdev->data->rx_queues[qid];
- if (fsrxq->enable_events)
+ if (fsrxq != NULL && fsrxq->enable_events)
rte_eth_dev_rx_intr_disable(PORT_ID(sdev),
qid);
}
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 24e91c93..7f8bcd4c 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -57,7 +57,6 @@ static struct rte_eth_dev_info default_infos = {
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY,
@@ -74,7 +73,6 @@ static struct rte_eth_dev_info default_infos = {
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY,
@@ -88,6 +86,9 @@ static struct rte_eth_dev_info default_infos = {
ETH_RSS_IP |
ETH_RSS_UDP |
ETH_RSS_TCP,
+ .dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP,
};
static int
@@ -170,6 +171,27 @@ fs_dev_configure(struct rte_eth_dev *dev)
return 0;
}
+static void
+fs_set_queues_state_start(struct rte_eth_dev *dev)
+{
+ struct rxq *rxq;
+ struct txq *txq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
+ dev->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq != NULL && !txq->info.conf.tx_deferred_start)
+ dev->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+}
+
static int
fs_dev_start(struct rte_eth_dev *dev)
{
@@ -204,14 +226,31 @@ fs_dev_start(struct rte_eth_dev *dev)
}
sdev->state = DEV_STARTED;
}
- if (PRIV(dev)->state < DEV_STARTED)
+ if (PRIV(dev)->state < DEV_STARTED) {
PRIV(dev)->state = DEV_STARTED;
+ fs_set_queues_state_start(dev);
+ }
fs_switch_dev(dev, NULL);
fs_unlock(dev, 0);
return 0;
}
static void
+fs_set_queues_state_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ if (dev->data->rx_queues[i] != NULL)
+ dev->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ if (dev->data->tx_queues[i] != NULL)
+ dev->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+static void
fs_dev_stop(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
@@ -225,6 +264,7 @@ fs_dev_stop(struct rte_eth_dev *dev)
sdev->state = DEV_STARTED - 1;
}
failsafe_rx_intr_uninstall(dev);
+ fs_set_queues_state_stop(dev);
fs_unlock(dev, 0);
}
@@ -294,6 +334,112 @@ fs_dev_close(struct rte_eth_dev *dev)
fs_unlock(dev, 0);
}
+static int
+fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ int err = 0;
+ bool failure = true;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Rx queue stop failed for subdevice %d", i);
+ err = ret;
+ } else {
+ failure = false;
+ }
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ fs_unlock(dev, 0);
+ /* Return 0 in case of at least one successful queue stop */
+ return (failure) ? err : 0;
+}
+
+static int
+fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Rx queue start failed for subdevice %d", i);
+ fs_rx_queue_stop(dev, rx_queue_id);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ int err = 0;
+ bool failure = true;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Tx queue stop failed for subdevice %d", i);
+ err = ret;
+ } else {
+ failure = false;
+ }
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ fs_unlock(dev, 0);
+ /* Return 0 in case of at least one successful queue stop */
+ return (failure) ? err : 0;
+}
+
+static int
+fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Tx queue start failed for subdevice %d", i);
+ fs_tx_queue_stop(dev, tx_queue_id);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
static void
fs_rx_queue_release(void *queue)
{
@@ -309,9 +455,13 @@ fs_rx_queue_release(void *queue)
fs_lock(dev, 0);
if (rxq->event_fd > 0)
close(rxq->event_fd);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, rx_queue_release)
- (ETH(sdev)->data->rx_queues[rxq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->rx_queues != NULL &&
+ ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
+ SUBOPS(sdev, rx_queue_release)
+ (ETH(sdev)->data->rx_queues[rxq->qid]);
+ }
+ }
dev->data->rx_queues[rxq->qid] = NULL;
rte_free(rxq);
fs_unlock(dev, 0);
@@ -341,6 +491,16 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
int ret;
fs_lock(dev, 0);
+ if (rx_conf->rx_deferred_start) {
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ if (SUBOPS(sdev, rx_queue_start) == NULL) {
+ ERROR("Rx queue deferred start is not "
+ "supported for subdevice %d", i);
+ fs_unlock(dev, 0);
+ return -EINVAL;
+ }
+ }
+ }
rxq = dev->data->rx_queues[rx_queue_id];
if (rxq != NULL) {
fs_rx_queue_release(rxq);
@@ -477,9 +637,13 @@ fs_tx_queue_release(void *queue)
txq = queue;
dev = txq->priv->dev;
fs_lock(dev, 0);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, tx_queue_release)
- (ETH(sdev)->data->tx_queues[txq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->tx_queues != NULL &&
+ ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
+ SUBOPS(sdev, tx_queue_release)
+ (ETH(sdev)->data->tx_queues[txq->qid]);
+ }
+ }
dev->data->tx_queues[txq->qid] = NULL;
rte_free(txq);
fs_unlock(dev, 0);
@@ -498,6 +662,16 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
int ret;
fs_lock(dev, 0);
+ if (tx_conf->tx_deferred_start) {
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ if (SUBOPS(sdev, tx_queue_start) == NULL) {
+ ERROR("Tx queue deferred start is not "
+ "supported for subdevice %d", i);
+ fs_unlock(dev, 0);
+ return -EINVAL;
+ }
+ }
+ }
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
fs_tx_queue_release(txq);
@@ -716,6 +890,8 @@ fs_stats_reset(struct rte_eth_dev *dev)
* all sub_devices and the default capabilities.
* Uses a logical AND of TX capabilities among
* the active probed sub_device and the default capabilities.
+ * Uses a logical AND of device capabilities among
+ * all sub_devices and the default capabilities.
*
*/
static void
@@ -734,10 +910,12 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
uint64_t rx_offload_capa;
uint64_t rxq_offload_capa;
uint64_t rss_hf_offload_capa;
+ uint64_t dev_capa;
rx_offload_capa = default_infos.rx_offload_capa;
rxq_offload_capa = default_infos.rx_queue_offload_capa;
rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
+ dev_capa = default_infos.dev_capa;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
rte_eth_dev_info_get(PORT_ID(sdev),
&PRIV(dev)->infos);
@@ -746,12 +924,14 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
PRIV(dev)->infos.rx_queue_offload_capa;
rss_hf_offload_capa &=
PRIV(dev)->infos.flow_type_rss_offloads;
+ dev_capa &= PRIV(dev)->infos.dev_capa;
}
sdev = TX_SUBDEV(dev);
rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
+ PRIV(dev)->infos.dev_capa = dev_capa;
PRIV(dev)->infos.tx_offload_capa &=
default_infos.tx_offload_capa;
PRIV(dev)->infos.tx_queue_offload_capa &=
@@ -953,6 +1133,55 @@ fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
}
static int
+fs_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ void *mcast_addrs;
+
+ fs_lock(dev, 0);
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ mc_addr_set, nb_mc_addr);
+ if (ret != 0) {
+ ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d",
+ i, ret);
+ goto rollback;
+ }
+ }
+
+ mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs,
+ nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0);
+ if (mcast_addrs == NULL && nb_mc_addr > 0) {
+ ret = -ENOMEM;
+ goto rollback;
+ }
+ rte_memcpy(mcast_addrs, mc_addr_set,
+ nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]));
+ PRIV(dev)->nb_mcast_addr = nb_mc_addr;
+ PRIV(dev)->mcast_addrs = mcast_addrs;
+
+ fs_unlock(dev, 0);
+ return 0;
+
+rollback:
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr);
+ if (rc != 0) {
+ ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d",
+ i, rc);
+ }
+ }
+
+ fs_unlock(dev, 0);
+ return ret;
+}
+
+static int
fs_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
@@ -1025,6 +1254,10 @@ const struct eth_dev_ops failsafe_ops = {
.dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
.mtu_set = fs_mtu_set,
.vlan_filter_set = fs_vlan_filter_set,
+ .rx_queue_start = fs_rx_queue_start,
+ .rx_queue_stop = fs_rx_queue_stop,
+ .tx_queue_start = fs_tx_queue_start,
+ .tx_queue_stop = fs_tx_queue_stop,
.rx_queue_setup = fs_rx_queue_setup,
.tx_queue_setup = fs_tx_queue_setup,
.rx_queue_release = fs_rx_queue_release,
@@ -1036,6 +1269,7 @@ const struct eth_dev_ops failsafe_ops = {
.mac_addr_remove = fs_mac_addr_remove,
.mac_addr_add = fs_mac_addr_add,
.mac_addr_set = fs_mac_addr_set,
+ .set_mc_addr_list = fs_set_mc_addr_list,
.rss_hash_update = fs_rss_hash_update,
.filter_ctrl = fs_filter_ctrl,
};
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
index 886af861..7e318968 100644
--- a/drivers/net/failsafe/failsafe_private.h
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -6,6 +6,7 @@
#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
#define _RTE_ETH_FAILSAFE_PRIVATE_H_
+#include <stdint.h>
#include <sys/queue.h>
#include <pthread.h>
@@ -13,6 +14,7 @@
#include <rte_dev.h>
#include <rte_ethdev_driver.h>
#include <rte_devargs.h>
+#include <rte_flow.h>
#include <rte_interrupts.h>
#define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
@@ -81,7 +83,8 @@ struct rte_flow {
/* sub_flows */
struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS];
/* flow description for synchronization */
- struct rte_flow_desc *fd;
+ struct rte_flow_conv_rule rule;
+ uint8_t rule_data[];
};
enum dev_state {
@@ -143,6 +146,8 @@ struct fs_priv {
uint32_t nb_mac_addr;
struct ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR];
uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
+ uint32_t nb_mcast_addr;
+ struct ether_addr *mcast_addrs;
/* current capabilities */
struct rte_eth_dev_info infos;
struct rte_eth_dev_owner my_owner; /* Unique owner. */
@@ -188,7 +193,7 @@ int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev);
/* RX / TX */
-void set_burst_fn(struct rte_eth_dev *dev, int force_safe);
+void failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe);
uint16_t failsafe_rx_burst(void *rxq,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
@@ -234,8 +239,8 @@ int failsafe_eth_new_event_callback(uint16_t port_id,
extern const char pmd_failsafe_driver_name[];
extern const struct eth_dev_ops failsafe_ops;
extern const struct rte_flow_ops fs_flow_ops;
-extern uint64_t hotplug_poll;
-extern int mac_from_arg;
+extern uint64_t failsafe_hotplug_poll;
+extern int failsafe_mac_from_arg;
/* HELPERS */
@@ -468,7 +473,7 @@ fs_switch_dev(struct rte_eth_dev *dev,
} else {
return;
}
- set_burst_fn(dev, 0);
+ failsafe_set_burst_fn(dev, 0);
rte_wmb();
}
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
index 7bd0f963..034f47b8 100644
--- a/drivers/net/failsafe/failsafe_rxtx.c
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -29,7 +29,7 @@ fs_tx_unsafe(struct sub_device *sdev)
}
void
-set_burst_fn(struct rte_eth_dev *dev, int force_safe)
+failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe)
{
struct sub_device *sdev;
uint8_t i;
diff --git a/drivers/net/fm10k/base/meson.build b/drivers/net/fm10k/base/meson.build
index a8fc5fa8..5525cdc8 100644
--- a/drivers/net/fm10k/base/meson.build
+++ b/drivers/net/fm10k/base/meson.build
@@ -15,6 +15,9 @@ error_cflags = ['-Wno-unused-parameter', '-Wno-unused-value',
'-Wno-unused-variable', '-Wno-missing-field-initializers'
]
c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
foreach flag: error_cflags
if cc.has_argument(flag)
c_args += flag
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 541a49b7..c852022d 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -72,7 +72,7 @@ struct fm10k_xstats_name_off {
unsigned offset;
};
-struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
+static const struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
{"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
{"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
{"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
@@ -87,7 +87,7 @@ struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
sizeof(fm10k_hw_stats_strings[0]))
-struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
+static const struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
{"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
{"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
{"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
@@ -96,7 +96,7 @@ struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
sizeof(fm10k_hw_stats_rx_q_strings[0]))
-struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
+static const struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
{"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
{"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
};
@@ -129,13 +129,13 @@ fm10k_mbx_unlock(struct fm10k_hw *hw)
}
/* Stubs needed for linkage when vPMD is disabled */
-int __attribute__((weak))
+__rte_weak int
fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
fm10k_recv_pkts_vec(
__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
@@ -144,7 +144,7 @@ fm10k_recv_pkts_vec(
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
fm10k_recv_scattered_pkts_vec(
__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
@@ -153,33 +153,33 @@ fm10k_recv_scattered_pkts_vec(
return 0;
}
-int __attribute__((weak))
+__rte_weak int
fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
{
return -1;
}
-void __attribute__((weak))
+__rte_weak void
fm10k_rx_queue_release_mbufs_vec(
__rte_unused struct fm10k_rx_queue *rxq)
{
return;
}
-void __attribute__((weak))
+__rte_weak void
fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
{
return;
}
-int __attribute__((weak))
+__rte_weak int
fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
@@ -451,12 +451,6 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- /* KEEP_CRC offload flag is not supported by PMD
- * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
- */
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
- PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
-
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
@@ -1325,7 +1319,7 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
static int
fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- uint64_t ipackets, opackets, ibytes, obytes;
+ uint64_t ipackets, opackets, ibytes, obytes, imissed;
struct fm10k_hw *hw =
FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_hw_stats *hw_stats =
@@ -1336,22 +1330,25 @@ fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
fm10k_update_hw_stats(hw, hw_stats);
- ipackets = opackets = ibytes = obytes = 0;
+ ipackets = opackets = ibytes = obytes = imissed = 0;
for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
(i < hw->mac.max_queues); ++i) {
stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
+ stats->q_errors[i] = hw_stats->q[i].rx_drops.count;
ipackets += stats->q_ipackets[i];
opackets += stats->q_opackets[i];
ibytes += stats->q_ibytes[i];
obytes += stats->q_obytes[i];
+ imissed += stats->q_errors[i];
}
stats->ipackets = ipackets;
stats->opackets = opackets;
stats->ibytes = ibytes;
stats->obytes = obytes;
+ stats->imissed = imissed;
return 0;
}
@@ -1796,7 +1793,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_HEADER_SPLIT);
}
@@ -1982,6 +1978,7 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
RTE_SET_USED(dev);
return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
@@ -3237,14 +3234,6 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
fm10k_dev_interrupt_handler_vf, (void *)dev);
}
- /* free mac memory */
- if (dev->data->mac_addrs) {
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
- }
-
- memset(hw, 0, sizeof(*hw));
-
return 0;
}
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
index 247ba11d..84f191fa 100644
--- a/drivers/net/i40e/base/README
+++ b/drivers/net/i40e/base/README
@@ -34,7 +34,7 @@ Intel® I40E driver
==================
This directory contains source code of FreeBSD i40e driver of version
-cid-i40e.2018.01.02.tar.gz released by the team which develops
+cid-i40e.2018.09.13.tar.gz released by the team which develops
basic drivers for any i40e NIC. The directory of base/ contains the
original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 612be883..38214a37 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "i40e_status.h"
#include "i40e_type.h"
@@ -126,6 +97,7 @@ enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
void i40e_free_adminq_asq(struct i40e_hw *hw)
{
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
@@ -433,7 +405,7 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
/* initialize base registers */
ret_code = i40e_config_asq_regs(hw);
if (ret_code != I40E_SUCCESS)
- goto init_adminq_free_rings;
+ goto init_config_regs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
@@ -441,6 +413,10 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
init_adminq_free_rings:
i40e_free_adminq_asq(hw);
+ return ret_code;
+
+init_config_regs:
+ i40e_free_asq_bufs(hw);
init_adminq_exit:
return ret_code;
@@ -692,6 +668,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ }
+ if (hw->mac.type == I40E_MAC_X722 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
/* Newer versions of firmware require lock when reading the NVM */
@@ -987,6 +969,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
cmd_completed = true;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = I40E_SUCCESS;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h
index de4ab3f3..769d8480 100644
--- a/drivers/net/i40e/base/i40e_adminq.h
+++ b/drivers/net/i40e/base/i40e_adminq.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 801c0ff1..83062602 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
@@ -41,7 +12,7 @@ POSSIBILITY OF SUCH DAMAGE.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0006
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
@@ -50,6 +21,8 @@ POSSIBILITY OF SUCH DAMAGE.
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
struct i40e_aq_desc {
__le16 flags;
@@ -804,7 +777,35 @@ struct i40e_aqc_set_switch_config {
*/
__le16 first_tag;
__le16 second_tag;
- u8 reserved[6];
+ /* Next byte is split into following:
+ * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
+ * Bit 6 : 0 : Destination Port, 1: source port
+ * Bit 5..4 : L4 type
+ * 0: rsvd
+ * 1: TCP
+ * 2: UDP
+ * 3: Both TCP and UDP
+ * Bits 3:0 Mode
+ * 0: default mode
+ * 1: L4 port only mode
+ * 2: non-tunneled mode
+ * 3: tunneled mode
+ */
+#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
+
+#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
+
+#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
+#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
+#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
+#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
+
+#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
+#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
+#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
+#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
+ u8 mode;
+ u8 rsvd5[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1359,6 +1360,7 @@ struct i40e_aqc_add_remove_cloud_filters {
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
u8 big_buffer_flag;
#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+#define I40E_AQC_ADD_CLOUD_CMD_BB 1
u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
@@ -1366,7 +1368,7 @@ struct i40e_aqc_add_remove_cloud_filters {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
-struct i40e_aqc_add_remove_cloud_filters_element_data {
+struct i40e_aqc_cloud_filters_element_data {
u8 outer_mac[6];
u8 inner_mac[6];
__le16 inner_vlan;
@@ -1378,6 +1380,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
struct {
u8 data[16];
} v6;
+ struct {
+ __le16 data[8];
+ } raw_v6;
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
@@ -1397,6 +1402,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
/* 0x0010 to 0x0017 is for custom filters */
+#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
@@ -1436,7 +1444,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
* DCR288
*/
struct i40e_aqc_add_rm_cloud_filt_elem_ext {
- struct i40e_aqc_add_remove_cloud_filters_element_data element;
+ struct i40e_aqc_cloud_filters_element_data element;
u16 general_fields[32];
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
@@ -1471,6 +1479,49 @@ struct i40e_aqc_add_rm_cloud_filt_elem_ext {
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
+
+/* i40e_aqc_cloud_filters_element_bb is used when
+ * I40E_AQC_CLOUD_CMD_BB flag is set.
+ */
+struct i40e_aqc_cloud_filters_element_bb {
+ struct i40e_aqc_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
+
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
@@ -1491,6 +1542,8 @@ struct i40e_filter_data {
u8 input[3];
};
+I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
+
struct i40e_aqc_replace_cloud_filters_cmd {
u8 valid_flags;
#define I40E_AQC_REPLACE_L1_FILTER 0x0
@@ -1501,11 +1554,14 @@ struct i40e_aqc_replace_cloud_filters_cmd {
u8 old_filter_type;
u8 new_filter_type;
u8 tr_bit;
- u8 reserved[4];
+ u8 tr_bit2;
+ u8 reserved[3];
__le32 addr_high;
__le32 addr_low;
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
+
struct i40e_aqc_replace_cloud_filters_cmd_buf {
u8 data[32];
/* Filter type INPUT codes*/
@@ -1530,6 +1586,8 @@ struct i40e_aqc_replace_cloud_filters_cmd_buf {
struct i40e_filter_data filters[8];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
+
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
@@ -1878,23 +1936,115 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+#ifdef CARLSVILLE_HW
+ I40E_PHY_TYPE_2_5GBASE_T = 0x30,
+ I40E_PHY_TYPE_5GBASE_T = 0x31,
+#endif
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
I40E_PHY_TYPE_DEFAULT = 0xFF,
};
+#ifdef CARLSVILLE_HW
+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
+ BIT_ULL(I40E_PHY_TYPE_XAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XFI) | \
+ BIT_ULL(I40E_PHY_TYPE_SFI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
+ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
+ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
+ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
+ BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
+#else
+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
+ BIT_ULL(I40E_PHY_TYPE_XAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XFI) | \
+ BIT_ULL(I40E_PHY_TYPE_SFI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
+ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
+ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
+ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
+#endif
+
+#ifdef CARLSVILLE_HW
+#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0
+#endif
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+#ifdef CARLSVILLE_HW
+#define I40E_LINK_SPEED_5GB_SHIFT 0x7
+#endif
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+#ifdef CARLSVILLE_HW
+ I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
+ I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT),
+#endif
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
@@ -1940,6 +2090,10 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+#ifdef CARLSVILLE_HW
+#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40
+#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80
+#endif
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2184,7 +2338,9 @@ struct i40e_aqc_phy_register_access {
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_addres;
- u8 reserved1[2];
+ u8 cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 1
+ u8 reserved1;
__le32 reg_address;
__le32 reg_value;
u8 reserved2[4];
@@ -2199,6 +2355,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
diff --git a/drivers/net/i40e/base/i40e_alloc.h b/drivers/net/i40e/base/i40e_alloc.h
index 38c2f655..4fc18601 100644
--- a/drivers/net/i40e/base/i40e_alloc.h
+++ b/drivers/net/i40e/base/i40e_alloc.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index e0a5be14..8a98afff 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -1,42 +1,12 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
#include "virtchnl.h"
-
/**
* i40e_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -65,6 +35,9 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+#ifdef CARLSVILLE_HW
+ case I40E_DEV_ID_10G_BASE_T_BC:
+#endif
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
case I40E_DEV_ID_25G_B:
@@ -1290,6 +1263,10 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_1000BASE_T:
+#ifdef CARLSVILLE_HW
+ case I40E_PHY_TYPE_2_5GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
+#endif
case I40E_PHY_TYPE_10GBASE_T:
media = I40E_MEDIA_TYPE_BASET;
break;
@@ -1326,6 +1303,29 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
return media;
}
+/**
+ * i40e_poll_globr - Poll for Global Reset completion
+ * @hw: pointer to the hardware structure
+ * @retry_limit: how many times to retry before failure
+ **/
+STATIC enum i40e_status_code i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
+{
+ u32 cnt, reg = 0;
+
+ for (cnt = 0; cnt < retry_limit; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ return I40E_SUCCESS;
+ i40e_msec_delay(100);
+ }
+
+ DEBUGOUT("Global reset failed.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg);
+
+ return I40E_ERR_RESET_FAILED;
+}
+
#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
@@ -1349,7 +1349,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- grst_del = grst_del * 20;
+ grst_del = min(grst_del * 20, 160U);
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
@@ -1395,14 +1395,14 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
reg2 = rd32(hw, I40E_GLGEN_RSTAT);
- if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
- DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
- DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
- return I40E_ERR_NOT_READY;
- }
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
+ break;
i40e_msec_delay(1);
}
- if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS)
+ return I40E_ERR_RESET_FAILED;
+ } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
DEBUGOUT("PF reset polling failed to complete.\n");
return I40E_ERR_RESET_FAILED;
}
@@ -1883,6 +1883,10 @@ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
if (crc_en)
cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
+ cmd->fc_refresh_threshold =
+ CPU_TO_LE16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -2711,13 +2715,14 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
* i40e_aq_set_switch_config
* @hw: pointer to the hardware structure
* @flags: bit flag values to set
+ * @mode: cloud filter mode
* @valid_flags: which bit flags to set
* @cmd_details: pointer to command details structure or NULL
*
* Set switch configuration bits
**/
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags, u16 valid_flags,
+ u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2729,6 +2734,7 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = CPU_TO_LE16(flags);
scfg->valid_flags = CPU_TO_LE16(valid_flags);
+ scfg->mode = mode;
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
scfg->first_tag = CPU_TO_LE16(hw->first_tag);
@@ -3708,9 +3714,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ enum i40e_status_code status;
+ u16 id, ocp_cfg_word0;
u8 major_rev;
u32 i = 0;
- u16 id;
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@@ -4002,6 +4009,26 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
hw->num_ports++;
}
+ /* OCP cards case: if a mezz is removed the ethernet port is at
+ * disabled state in PRTGEN_CNF register. Additional NVM read is
+ * needed in order to check if we are dealing with OCP card.
+ * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
+ * physical ports results in wrong partition id calculation and thus
+ * not supporting WoL.
+ */
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) {
+ status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
+ 2 * I40E_SR_OCP_CFG_WORD0,
+ sizeof(ocp_cfg_word0),
+ &ocp_cfg_word0, true, NULL);
+ if (status == I40E_SUCCESS &&
+ (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
+ hw->num_ports = 4;
+ i40e_release_nvm(hw);
+ }
+ }
+
valid_functions = p->valid_functions;
num_functions = 0;
while (valid_functions) {
@@ -4132,6 +4159,43 @@ i40e_aq_update_nvm_exit:
}
/**
+ * i40e_aq_rearrange_nvm
+ * @hw: pointer to the hw struct
+ * @rearrange_nvm: defines direction of rearrangement
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Rearrange NVM structure, available only for transition FW
+ **/
+enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_nvm_update *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ DEBUGFUNC("i40e_aq_rearrange_nvm");
+
+ cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
+ I40E_AQ_NVM_REARRANGE_TO_STRUCT);
+
+ if (!rearrange_nvm) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_rearrange_nvm_exit;
+ }
+
+ cmd->command_flags |= rearrange_nvm;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_rearrange_nvm_exit:
+ return status;
+}
+
+/**
* i40e_aq_nvm_progress
* @hw: pointer to the hw struct
* @progress: pointer to progress returned from AQ
@@ -4488,6 +4552,9 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
enum i40e_status_code status;
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
@@ -5693,10 +5760,10 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
* to be shifted 1 byte over from the VxLAN VNI
**/
STATIC void i40e_fix_up_geneve_vni(
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+ struct i40e_aqc_cloud_filters_element_data *f = filters;
int i;
for (i = 0; i < filter_count; i++) {
@@ -5721,13 +5788,13 @@ STATIC void i40e_fix_up_geneve_vni(
* @filter_count: number of filters contained in the buffer
*
* Set the cloud filters for a given VSI. The contents of the
- * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * i40e_aqc_cloud_filters_element_data are filled
* in by the caller of the function.
*
**/
enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
struct i40e_aq_desc desc;
@@ -5753,21 +5820,21 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
}
/**
- * i40e_aq_add_cloud_filters_big_buffer
+ * i40e_aq_add_cloud_filters_bb
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
* @filters: Buffer which contains the filters in big buffer to be added
* @filter_count: number of filters contained in the buffer
*
* Set the cloud filters for a given VSI. The contents of the
- * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
* the function.
*
**/
-enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
- u8 filter_count)
+enum i40e_status_code
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
@@ -5784,9 +5851,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
- cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
- /* adjust Geneve VNI for HW issue */
for (i = 0; i < filter_count; i++) {
u16 tnl_type;
u32 ti;
@@ -5794,6 +5860,11 @@ enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
ti = LE32_TO_CPU(filters[i].element.tenant_id);
filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
@@ -5806,21 +5877,21 @@ enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
}
/**
- * i40e_aq_remove_cloud_filters
+ * i40e_aq_rem_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
* @filters: Buffer which contains the filters to be removed
* @filter_count: number of filters contained in the buffer
*
* Remove the cloud filters for a given VSI. The contents of the
- * i40e_aqc_add_remove_cloud_filters_element_data are filled
- * in by the caller of the function.
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
*
**/
-enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count)
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
@@ -5845,22 +5916,21 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
}
/**
- * i40e_aq_remove_cloud_filters_big_buffer
+ * i40e_aq_rem_cloud_filters_bb
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
* @filters: Buffer which contains the filters in big buffer to be removed
* @filter_count: number of filters contained in the buffer
*
- * Remove the cloud filters for a given VSI. The contents of the
- * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
- * the function.
+ * Remove the big buffer cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
*
**/
-enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
- struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
- u8 filter_count)
+enum i40e_status_code
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
@@ -5877,9 +5947,8 @@ enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
- cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
- /* adjust Geneve VNI for HW issue */
for (i = 0; i < filter_count; i++) {
u16 tnl_type;
u32 ti;
@@ -5887,6 +5956,11 @@ enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
ti = LE32_TO_CPU(filters[i].element.tenant_id);
filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
@@ -5916,6 +5990,14 @@ i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
enum i40e_status_code status = I40E_SUCCESS;
int i = 0;
+ /* X722 doesn't support this command */
+ if (hw->mac.type == I40E_MAC_X722)
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+ /* need FW version greater than 6.00 */
+ if (hw->aq.fw_maj_ver < 6)
+ return I40E_NOT_SUPPORTED;
+
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_replace_cloud_filters);
@@ -5925,6 +6007,7 @@ i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
cmd->new_filter_type = filters->new_filter_type;
cmd->valid_flags = filters->valid_flags;
cmd->tr_bit = filters->tr_bit;
+ cmd->tr_bit2 = filters->tr_bit2;
status = i40e_asq_send_command(hw, &desc, cmd_buf,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
@@ -6618,6 +6701,9 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+#ifdef CARLSVILLE_HW
+ case I40E_DEV_ID_10G_BASE_T_BC:
+#endif
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -6773,7 +6859,7 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -6801,7 +6887,7 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -6835,7 +6921,7 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
if (status == I40E_SUCCESS)
@@ -7036,11 +7122,13 @@ do_retry:
wr32(hw, reg_addr, reg_val);
}
+#ifdef PF_DRIVER
/**
* i40e_aq_set_phy_register
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: enable auto page change
* @reg_addr: PHY register address
* @reg_val: new register value
* @cmd_details: pointer to command details structure or NULL
@@ -7048,7 +7136,7 @@ do_retry:
* Write the external PHY register.
**/
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -7065,6 +7153,9 @@ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
cmd->reg_address = CPU_TO_LE32(reg_addr);
cmd->reg_value = CPU_TO_LE32(reg_val);
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -7075,6 +7166,7 @@ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: enable auto page change
* @reg_addr: PHY register address
* @reg_val: read register value
* @cmd_details: pointer to command details structure or NULL
@@ -7082,7 +7174,7 @@ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
* Read the external PHY register.
**/
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -7098,6 +7190,9 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
cmd->dev_addres = dev_addr;
cmd->reg_address = CPU_TO_LE32(reg_addr);
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
*reg_val = LE32_TO_CPU(cmd->reg_value);
@@ -7105,6 +7200,7 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
return status;
}
+#endif /* PF_DRIVER */
#ifdef VF_DRIVER
/**
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 7600c922..a26f82b3 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "i40e_adminq.h"
#include "i40e_prototype.h"
@@ -1291,18 +1262,20 @@ static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw,
{
u32 address, offset = (2 * word_offset);
enum i40e_status_code ret;
+ __le16 raw_mem;
u16 mem;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret != I40E_SUCCESS)
return ret;
- ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(mem), &mem, true,
- NULL);
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
+ true, NULL);
i40e_release_nvm(hw);
if (ret != I40E_SUCCESS)
return ret;
+ mem = LE16_TO_CPU(raw_mem);
/* Check if this pointer needs to be read in word size or 4K sector
* units.
*/
@@ -1315,12 +1288,13 @@ static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw,
if (ret != I40E_SUCCESS)
goto err_lldp_cfg;
- ret = i40e_aq_read_nvm(hw, module, offset, sizeof(mem), &mem, true,
- NULL);
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
+ true, NULL);
i40e_release_nvm(hw);
if (ret != I40E_SUCCESS)
return ret;
+ mem = LE16_TO_CPU(raw_mem);
offset = mem + word_offset;
offset *= 2;
diff --git a/drivers/net/i40e/base/i40e_dcb.h b/drivers/net/i40e/base/i40e_dcb.h
index 3b709efd..85b0eed3 100644
--- a/drivers/net/i40e/base/i40e_dcb.h
+++ b/drivers/net/i40e/base/i40e_dcb.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_DCB_H_
#define _I40E_DCB_H_
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 66ff1ccf..8b667c2a 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_DEVIDS_H_
#define _I40E_DEVIDS_H_
@@ -51,6 +22,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
+#ifdef CARLSVILLE_HW
+#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
+#endif
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
diff --git a/drivers/net/i40e/base/i40e_diag.c b/drivers/net/i40e/base/i40e_diag.c
index c3c76a0c..3ccbea48 100644
--- a/drivers/net/i40e/base/i40e_diag.c
+++ b/drivers/net/i40e/base/i40e_diag.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "i40e_diag.h"
#include "i40e_prototype.h"
@@ -84,7 +55,7 @@ static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw,
return I40E_SUCCESS;
}
-struct i40e_diag_reg_test_info i40e_reg_list[] = {
+static struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
{I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
{I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
diff --git a/drivers/net/i40e/base/i40e_diag.h b/drivers/net/i40e/base/i40e_diag.h
index 105b1191..4434fc96 100644
--- a/drivers/net/i40e/base/i40e_diag.h
+++ b/drivers/net/i40e/base/i40e_diag.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
@@ -50,8 +21,6 @@ struct i40e_diag_reg_test_info {
u32 stride; /* bytes between each element */
};
-extern struct i40e_diag_reg_test_info i40e_reg_list[];
-
enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
enum i40e_lb_mode mode);
enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw);
diff --git a/drivers/net/i40e/base/i40e_hmc.c b/drivers/net/i40e/base/i40e_hmc.c
index 502407bd..11c9ae20 100644
--- a/drivers/net/i40e/base/i40e_hmc.c
+++ b/drivers/net/i40e/base/i40e_hmc.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "i40e_osdep.h"
#include "i40e_register.h"
diff --git a/drivers/net/i40e/base/i40e_hmc.h b/drivers/net/i40e/base/i40e_hmc.h
index 343b251f..289264ed 100644
--- a/drivers/net/i40e/base/i40e_hmc.h
+++ b/drivers/net/i40e/base/i40e_hmc.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.c b/drivers/net/i40e/base/i40e_lan_hmc.c
index f03f3813..0afee49b 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.c
+++ b/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "i40e_osdep.h"
#include "i40e_register.h"
@@ -143,7 +114,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
txq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -166,7 +137,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
rxq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -189,7 +160,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_cntx_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -212,7 +183,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_filt_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -233,7 +204,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
(sizeof(struct i40e_hmc_sd_entry) *
hw->hmc.sd_table.sd_cnt));
if (ret_code)
- goto init_lan_hmc_out;
+ goto free_hmc_out;
hw->hmc.sd_table.sd_entry =
(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
}
@@ -242,6 +213,11 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
init_lan_hmc_out:
return ret_code;
+free_hmc_out:
+ if (hw->hmc.hmc_obj_virt_mem.va)
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+
+ return ret_code;
}
/**
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.h b/drivers/net/i40e/base/i40e_lan_hmc.h
index b2a43104..e531ec49 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.h
+++ b/drivers/net/i40e/base/i40e_lan_hmc.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index c77dac02..6c8ca877 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "i40e_prototype.h"
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index 8e5c593c..8a2d82a8 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -1,34 +1,6 @@
-/******************************************************************************
-
- Copyright (c) 2001-2015, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_OSDEP_H_
#define _I40E_OSDEP_H_
@@ -233,9 +205,9 @@ struct i40e_spinlock {
#define i40e_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#define DELAY(x) rte_delay_us(x)
-#define i40e_usec_delay(x) rte_delay_us(x)
-#define i40e_msec_delay(x) rte_delay_us(1000*(x))
+#define DELAY(x) rte_delay_us_sleep(x)
+#define i40e_usec_delay(x) DELAY(x)
+#define i40e_msec_delay(x) DELAY(1000 * (x))
#define udelay(x) DELAY(x)
#define msleep(x) DELAY(1000*(x))
#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index c6ec2d76..0cf006da 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
@@ -228,7 +199,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags, u16 valid_flags,
+ u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
@@ -265,6 +236,9 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
@@ -404,24 +378,24 @@ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
-enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
- u16 vsi,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count);
-enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
- u8 filter_count);
-enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 vsi,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count);
-enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
- struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
- u8 filter_count);
enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
struct i40e_aqc_replace_cloud_filters_cmd *filters,
struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
@@ -574,11 +548,11 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index df66e76a..e93ec3f5 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
diff --git a/drivers/net/i40e/base/i40e_status.h b/drivers/net/i40e/base/i40e_status.h
index 49af2d9f..1dad4f4b 100644
--- a/drivers/net/i40e/base/i40e_status.h
+++ b/drivers/net/i40e/base/i40e_status.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 006a11a8..77562f24 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
@@ -358,6 +329,14 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
+#ifdef CARLSVILLE_HW
+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
+#define I40E_PHY_TYPE_OFFSET2 (-10)
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
+#endif
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@@ -378,6 +357,16 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define I40E_CLOUD_FILTER_MODE1 0x6
+#define I40E_CLOUD_FILTER_MODE2 0x7
+#define I40E_CLOUD_FILTER_MODE3 0x8
+#define I40E_SWITCH_MODE_MASK 0xF
+
u32 management_mode;
u32 mng_protocols_over_mctp;
#define I40E_MNG_PROTOCOL_PLDM 0x2
@@ -731,6 +720,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
u64 flags;
/* Used in set switch config AQ command */
@@ -1541,7 +1531,9 @@ struct i40e_hw_port_stats {
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
-#define I40E_PTR_TYPE BIT(15)
+#define I40E_PTR_TYPE BIT(15)
+#define I40E_SR_OCP_CFG_WORD0 0x2B
+#define I40E_SR_OCP_ENABLED BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
diff --git a/drivers/net/i40e/base/meson.build b/drivers/net/i40e/base/meson.build
index 401a1477..d4c8f872 100644
--- a/drivers/net/i40e/base/meson.build
+++ b/drivers/net/i40e/base/meson.build
@@ -12,10 +12,13 @@ sources = [
]
error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
- '-Wno-format', '-Wno-unused-but-set-variable',
- '-Wno-strict-aliasing'
+ '-Wno-format', '-Wno-error=format-security',
+ '-Wno-strict-aliasing', '-Wno-unused-but-set-variable'
]
c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
foreach flag: error_cflags
if cc.has_argument(flag)
c_args += flag
diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h
index b2d5fe73..88096cb4 100644
--- a/drivers/net/i40e/base/virtchnl.h
+++ b/drivers/net/i40e/base/virtchnl.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2013 - 2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 85a6a867..1c779068 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -44,6 +44,7 @@
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
+#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
#define I40E_CLEAR_PXE_WAIT_MS 200
@@ -292,6 +293,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
uint64_t *stat);
static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
static void i40e_dev_interrupt_handler(void *param);
+static void i40e_dev_alarm_handler(void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
uint32_t base, uint32_t num);
static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
@@ -389,7 +391,7 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_tunnel_filter_convert(
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter,
struct i40e_tunnel_filter *tunnel_filter);
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
@@ -408,6 +410,7 @@ static const char *const valid_keys[] = {
ETH_I40E_FLOATING_VEB_LIST_ARG,
ETH_I40E_SUPPORT_MULTI_DRIVER,
ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ ETH_I40E_USE_LATEST_VEC,
NULL};
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -1202,6 +1205,66 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw,
}
static int
+i40e_parse_latest_vec_handler(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_adapter *ad;
+ int use_latest_vec;
+
+ ad = (struct i40e_adapter *)opaque;
+
+ use_latest_vec = atoi(value);
+
+ if (use_latest_vec != 0 && use_latest_vec != 1)
+ PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
+
+ ad->use_latest_vec = (uint8_t)use_latest_vec;
+
+ return 0;
+}
+
+static int
+i40e_use_latest_vec(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+
+ ad->use_latest_vec = false;
+
+ if (!dev->device->devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_USE_LATEST_VEC);
+
+ if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
+ i40e_parse_latest_vec_handler, ad) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
+#define I40E_ALARM_INTERVAL 50000 /* us */
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev;
@@ -1263,13 +1326,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
+ /* Check if users want the latest supported vec path */
+ i40e_use_latest_vec(dev);
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
- /* Initialize the hardware */
- i40e_hw_init(dev);
-
/* Reset here to make sure all is clean for each PF */
ret = i40e_pf_reset(hw);
if (ret) {
@@ -1284,6 +1346,23 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
return ret;
}
+ /* Initialize the parameters for adminq */
+ i40e_init_adminq_parameter(hw);
+ ret = i40e_init_adminq(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
+ return -EIO;
+ }
+ PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack);
+
+ /* Initialize the hardware */
+ i40e_hw_init(dev);
+
i40e_config_automask(pf);
i40e_set_default_pctype_table(dev);
@@ -1299,20 +1378,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
/* Initialize the input set for filters (hash and fd) to default value */
i40e_filter_input_set_init(pf);
- /* Initialize the parameters for adminq */
- i40e_init_adminq_parameter(hw);
- ret = i40e_init_adminq(hw);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
- return -EIO;
- }
- PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
- hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
- hw->aq.api_maj_ver, hw->aq.api_min_ver,
- ((hw->nvm.version >> 12) & 0xf),
- ((hw->nvm.version >> 4) & 0xff),
- (hw->nvm.version & 0xf), hw->nvm.eetrack);
-
/* initialise the L3_MAP register */
if (!pf->support_multi_driver) {
ret = i40e_aq_debug_write_global_register(hw,
@@ -1663,9 +1728,6 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
/* uninitialize pf host driver */
i40e_pf_host_uninit(dev);
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
@@ -1722,6 +1784,10 @@ i40e_dev_configure(struct rte_eth_dev *dev)
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
+ /* Only legacy filter API needs the following fdir config. So when the
+ * legacy filter API is deprecated, the following codes should also be
+ * removed.
+ */
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
@@ -1779,7 +1845,11 @@ err_dcb:
rte_free(pf->vmdq);
pf->vmdq = NULL;
err:
- /* need to release fdir resource if exists */
+ /* Need to release fdir resource if exists.
+ * Only legacy filter API needs the following fdir config. So when the
+ * legacy filter API is deprecated, the following code should also be
+ * removed.
+ */
i40e_fdir_teardown(pf);
return ret;
}
@@ -2293,8 +2363,13 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_dev_link_update(dev, 0);
}
- /* enable uio intr after callback register */
- rte_intr_enable(intr_handle);
+ if (dev->data->dev_conf.intr_conf.rxq == 0) {
+ rte_eal_alarm_set(I40E_ALARM_INTERVAL,
+ i40e_dev_alarm_handler, dev);
+ } else {
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+ }
i40e_filter_restore(pf);
@@ -2324,6 +2399,12 @@ i40e_dev_stop(struct rte_eth_dev *dev)
if (hw->adapter_stopped == 1)
return;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0) {
+ rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
+ rte_intr_enable(intr_handle);
+ }
+
/* Disable all queues */
i40e_dev_switch_queues(pf, FALSE);
@@ -2406,6 +2487,11 @@ i40e_dev_close(struct rte_eth_dev *dev)
i40e_pf_disable_irq0(hw);
rte_intr_disable(intr_handle);
+ /*
+ * Only legacy filter API needs the following fdir config. So when the
+ * legacy filter API is deprecated, the following code should also be
+ * removed.
+ */
i40e_fdir_teardown(pf);
/* shutdown and destroy the HMC */
@@ -2498,6 +2584,10 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
if (status != I40E_SUCCESS)
PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
+ /* must remain in all_multicast mode */
+ if (dev->data->all_multicast == 1)
+ return;
+
status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
false, NULL);
if (status != I40E_SUCCESS)
@@ -3363,8 +3453,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -3577,7 +3667,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
if (vlan_type == ETH_VLAN_TYPE_OUTER)
hw->second_tag = rte_cpu_to_le_16(tpid);
}
- ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
+ ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
"Set switch config failed aq_err: %d",
@@ -5282,7 +5372,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
int ret;
/* Use the FW API if FW >= v5.0 */
- if (hw->aq.fw_maj_ver < 5) {
+ if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
return;
}
@@ -5553,7 +5643,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
/* Use the VEB configuration if FW >= v5.0 */
- if (hw->aq.fw_maj_ver >= 5) {
+ if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
/* Configure switch ID */
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -6549,7 +6639,53 @@ i40e_dev_interrupt_handler(void *param)
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
- rte_intr_enable(dev->intr_handle);
+}
+
+static void
+i40e_dev_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+ /* No interrupt event indicated */
+ if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
+ goto done;
+ if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: global reset requested");
+ if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
+ if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: HMC error");
+ if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
+ i40e_dev_handle_vfr_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: adminq event");
+ i40e_dev_handle_aq_msg(dev);
+ }
+
+done:
+ /* Enable interrupt */
+ i40e_pf_enable_irq0(hw);
+ rte_eal_alarm_set(I40E_ALARM_INTERVAL,
+ i40e_dev_alarm_handler, dev);
}
int
@@ -7370,7 +7506,7 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
/* Convert tunnel filter structure */
static int
i40e_tunnel_filter_convert(
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter,
struct i40e_tunnel_filter *tunnel_filter)
{
ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
@@ -7468,8 +7604,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
int val, ret = 0;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi = pf->main_vsi;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb *pfilter;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
struct i40e_tunnel_filter *tunnel, *node;
struct i40e_tunnel_filter check_filter; /* Check if filter exists */
@@ -7577,7 +7713,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
if (ret < 0)
rte_free(tunnel);
} else {
- ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
&cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
@@ -7910,8 +8046,8 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_pf_vf *vf = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_aqc_cloud_filters_element_bb *cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb *pfilter;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
struct i40e_tunnel_filter *tunnel, *node;
struct i40e_tunnel_filter check_filter; /* Check if filter exists */
@@ -8114,7 +8250,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
if (add) {
if (big_buffer)
- ret = i40e_aq_add_cloud_filters_big_buffer(hw,
+ ret = i40e_aq_add_cloud_filters_bb(hw,
vsi->seid, cld_filter, 1);
else
ret = i40e_aq_add_cloud_filters(hw,
@@ -8137,11 +8273,11 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
rte_free(tunnel);
} else {
if (big_buffer)
- ret = i40e_aq_remove_cloud_filters_big_buffer(
+ ret = i40e_aq_rem_cloud_filters_bb(
hw, vsi->seid, cld_filter, 1);
else
- ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- &cld_filter->element, 1);
+ ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
+ &cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
rte_free(cld_filter);
@@ -11249,6 +11385,16 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
* LLDP MIB change event.
*/
if (sw_dcb == TRUE) {
+ /* When using NVM 6.01 or later, the RX data path does
+ * not hang if the FW LLDP is stopped.
+ */
+ if (((hw->nvm.version >> 12) & 0xf) >= 6 &&
+ ((hw->nvm.version >> 4) & 0xff) >= 1) {
+ ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
+ }
+
ret = i40e_init_dcb(hw);
/* If lldp agent is stopped, the return value from
* i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
@@ -11584,7 +11730,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev,
case I40E_MODULE_TYPE_SFP:
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, 1,
I40E_MODULE_SFF_8472_COMP,
&sff8472_comp, NULL);
if (status)
@@ -11592,7 +11738,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, 1,
I40E_MODULE_SFF_8472_SWAP,
&sff8472_swap, NULL);
if (status)
@@ -11620,7 +11766,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev,
/* Read from memory page 0. */
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- 0,
+ 0, 1,
I40E_MODULE_REVISION_ADDR,
&sff8636_rev, NULL);
if (status)
@@ -11681,7 +11827,7 @@ static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
}
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- addr, offset, &value, NULL);
+ addr, offset, 1, &value, NULL);
if (status)
return -EIO;
data[i] = (uint8_t)value;
@@ -11812,7 +11958,7 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
struct i40e_tunnel_filter_list
*tunnel_list = &pf->tunnel.tunnel_list;
struct i40e_tunnel_filter *f;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
bool big_buffer = 0;
TAILQ_FOREACH(f, tunnel_list, rules) {
@@ -11847,8 +11993,8 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
big_buffer = 1;
if (big_buffer)
- i40e_aq_add_cloud_filters_big_buffer(hw,
- vsi->seid, &cld_filter, 1);
+ i40e_aq_add_cloud_filters_bb(hw,
+ vsi->seid, &cld_filter, 1);
else
i40e_aq_add_cloud_filters(hw, vsi->seid,
&cld_filter.element, 1);
@@ -12527,4 +12673,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
ETH_I40E_FLOATING_VEB_ARG "=1"
ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
- ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
+ ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
+ ETH_I40E_USE_LATEST_VEC "=0|1");
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3fffe5a5..11ecfc30 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1078,6 +1078,9 @@ struct i40e_adapter {
uint64_t pctypes_tbl[I40E_FLOW_TYPE_MAX] __rte_cache_min_aligned;
uint64_t flow_types_mask;
uint64_t pctypes_mask;
+
+ /* For devargs */
+ uint8_t use_latest_vec;
};
/**
@@ -1393,6 +1396,8 @@ i40e_calc_itr_interval(bool is_pf, bool is_multi_drv)
(((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \
((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \
((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \
- ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR))
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_AOC) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_ACC))
#endif /* _I40E_ETHDEV_H_ */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 001c301b..ae55b9b1 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1384,10 +1384,8 @@ i40evf_dev_alarm_handler(void *param)
icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
/* No interrupt event indicated */
- if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
+ if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK))
goto done;
- }
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
@@ -1485,9 +1483,6 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
return -1;
}
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
return 0;
}
@@ -1522,8 +1517,6 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- struct rte_eth_conf *conf = &dev->data->dev_conf;
- struct i40e_vf *vf;
/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
@@ -1533,19 +1526,6 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
- /* For non-DPDK PF drivers, VF has no ability to disable HW
- * CRC strip, and is implicitly enabled by the PF.
- */
- if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
- vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
- (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
- /* Peer is running non-DPDK PF driver. */
- PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
- return -EINVAL;
- }
- }
-
return i40evf_init_vlan(dev);
}
@@ -2180,8 +2160,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -2268,7 +2246,6 @@ i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
/*
@@ -2282,6 +2259,7 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
i40evf_disable_irq0(hw);
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
}
/*
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index c67b264d..3694df25 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -133,8 +133,8 @@ const struct rte_flow_ops i40e_flow_ops = {
.flush = i40e_flow_flush,
};
-union i40e_filter_t cons_filter;
-enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+static union i40e_filter_t cons_filter;
+static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
@@ -3127,6 +3127,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
struct rte_flow_error *error,
union i40e_filter_t *filter)
{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_fdir_filter_conf *fdir_filter =
&filter->fdir_filter;
int ret;
@@ -3148,14 +3149,29 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "Check the mode in fdir_conf.");
- return -rte_errno;
+ /* Enable fdir when fdir flow is added at first time. */
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to setup fdir.");
+ return -rte_errno;
+ }
+ ret = i40e_fdir_configure(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to configure fdir.");
+ goto err;
+ }
+
+ dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
}
return 0;
+err:
+ i40e_fdir_teardown(pf);
+ return -rte_errno;
}
/* Parse to get the action info of a tunnel filter
@@ -4708,6 +4724,13 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+
+ /* If the last flow is destroyed, disable fdir. */
+ if (!ret && !TAILQ_EMPTY(&pf->fdir.fdir_list)) {
+ i40e_fdir_teardown(pf);
+ dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_NONE;
+ }
break;
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_del(dev,
@@ -4773,7 +4796,7 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi;
struct i40e_pf_vf *vf;
- struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
struct i40e_tunnel_filter *node;
bool big_buffer = 0;
@@ -4808,11 +4831,11 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
big_buffer = 1;
if (big_buffer)
- ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
- &cld_filter, 1);
+ ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
+ &cld_filter, 1);
else
- ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- &cld_filter.element, 1);
+ ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
if (ret < 0)
return -ENOTSUP;
@@ -4900,6 +4923,8 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
pf->fdir.inset_flag[pctype] = 0;
}
+ i40e_fdir_teardown(pf);
+
return ret;
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 2a28ee34..8bfa2517 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -55,6 +55,10 @@
PKT_TX_OUTER_IP_CKSUM)
#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_IPV4 | \
+ PKT_TX_IPV6 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM | \
@@ -83,7 +87,8 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+ PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
@@ -1828,7 +1833,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -2909,6 +2914,35 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.offloads = txq->offloads;
}
+static eth_rx_burst_t
+i40e_get_latest_rx_vec(bool scatter)
+{
+#ifdef RTE_ARCH_X86
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
+static eth_rx_burst_t
+i40e_get_recommend_rx_vec(bool scatter)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
void __attribute__((cold))
i40e_set_rx_function(struct rte_eth_dev *dev)
{
@@ -2940,57 +2974,17 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
}
}
- if (dev->data->scattered_rx) {
- /* Set the non-LRO scattered callback: there are Vector and
- * single allocation versions.
- */
- if (ad->rx_vec_allowed) {
- PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
- "callback (port=%d).",
- dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->rx_pkt_burst =
- i40e_recv_scattered_pkts_vec_avx2;
-#endif
- } else {
- PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
- "allocation callback (port=%d).",
- dev->data->port_id);
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
- }
- /* If parameters allow we are going to choose between the following
- * callbacks:
- * - Vector
- * - Bulk Allocation
- * - Single buffer allocation (the simplest one)
- */
- } else if (ad->rx_vec_allowed) {
- PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
- "burst size no less than %d (port=%d).",
- RTE_I40E_DESCS_PER_LOOP,
- dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2;
-#endif
- } else if (ad->rx_bulk_alloc_allowed) {
+ if (ad->rx_vec_allowed) {
+ /* Vec Rx path */
+ PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
+ dev->data->port_id);
+ if (ad->use_latest_vec)
+ dev->rx_pkt_burst =
+ i40e_get_latest_rx_vec(dev->data->scattered_rx);
+ else
+ dev->rx_pkt_burst =
+ i40e_get_recommend_rx_vec(dev->data->scattered_rx);
+ } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
"will be used on port=%d.",
@@ -2998,12 +2992,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
} else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
- "satisfied, or Scattered Rx is requested "
- "(port=%d).",
+ /* Simple Rx Path. */
+ PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.",
dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_pkts;
+ dev->rx_pkt_burst = dev->data->scattered_rx ?
+ i40e_recv_scattered_pkts :
+ i40e_recv_pkts;
}
/* Propagate information about RX function choice through all queues. */
@@ -3049,6 +3043,31 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
txq->queue_id);
}
+static eth_tx_burst_t
+i40e_get_latest_tx_vec(void)
+{
+#ifdef RTE_ARCH_X86
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
+static eth_tx_burst_t
+i40e_get_recommend_tx_vec(void)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
void __attribute__((cold))
i40e_set_tx_function(struct rte_eth_dev *dev)
{
@@ -3073,17 +3092,12 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
if (ad->tx_simple_allowed) {
if (ad->tx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
- dev->tx_pkt_burst = i40e_xmit_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2;
-#endif
+ if (ad->use_latest_vec)
+ dev->tx_pkt_burst =
+ i40e_get_latest_tx_vec();
+ else
+ dev->tx_pkt_burst =
+ i40e_get_recommend_tx_vec();
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
@@ -3166,13 +3180,13 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev)
}
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+__rte_weak int
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
@@ -3181,7 +3195,7 @@ i40e_recv_pkts_vec(
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
@@ -3190,7 +3204,7 @@ i40e_recv_scattered_pkts_vec(
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
@@ -3198,7 +3212,7 @@ i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
@@ -3206,25 +3220,25 @@ i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
return 0;
}
-int __attribute__((weak))
+__rte_weak int
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
return -1;
}
-int __attribute__((weak))
+__rte_weak int
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return -1;
}
-void __attribute__((weak))
+__rte_weak void
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
return;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)
@@ -3232,7 +3246,7 @@ i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 63cb1774..f00f6d64 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -199,9 +199,7 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
- /* - no csum error report support
- * - no header split support
- */
+ /* no header split support */
if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
index f9f13161..45a15d3a 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -48,6 +48,7 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -419,7 +420,7 @@ i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
representor->vf_id, vlan_id);
}
-struct eth_dev_ops i40e_representor_dev_ops = {
+static const struct eth_dev_ops i40e_representor_dev_ops = {
.dev_infos_get = i40e_vf_representor_dev_infos_get,
.dev_start = i40e_vf_representor_dev_start,
@@ -486,9 +487,6 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
if (representor->vf_id >= pf->vf_num)
return -ENODEV;
- /** representor shares the same driver as it's PF device */
- ethdev->device->driver = representor->adapter->eth_dev->device->driver;
-
/* Set representor device ops */
ethdev->dev_ops = &i40e_representor_dev_ops;
@@ -506,6 +504,7 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
}
ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ ethdev->data->representor_id = representor->vf_id;
/* Setting the number queues allocated to the VF */
ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
@@ -525,7 +524,10 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
}
int
-i40e_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+i40e_vf_representor_uninit(struct rte_eth_dev *ethdev)
{
+ /* mac_addrs must not be freed because part of i40e_pf_vf */
+ ethdev->data->mac_addrs = NULL;
+
return 0;
}
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index bba62b1c..7ce5d02f 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -338,7 +338,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
hw = I40E_VSI_TO_HW(vsi);
/* Use the FW API if FW >= v5.0 */
- if (hw->aq.fw_maj_ver < 5) {
+ if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
return -ENOTSUP;
}
diff --git a/drivers/net/ifc/base/ifcvf.c b/drivers/net/ifc/base/ifcvf.c
index 4b22d9ed..3c0b2dff 100644
--- a/drivers/net/ifc/base/ifcvf.c
+++ b/drivers/net/ifc/base/ifcvf.c
@@ -249,7 +249,7 @@ ifcvf_hw_disable(struct ifcvf_hw *hw)
IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector);
ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
(i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4);
- hw->vring[i].last_avail_idx = (u16)ring_state;
+ hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
}
}
@@ -279,6 +279,37 @@ ifcvf_stop_hw(struct ifcvf_hw *hw)
}
void
+ifcvf_enable_logging(struct ifcvf_hw *hw, u64 log_base, u64 log_size)
+{
+ u8 *lm_cfg;
+
+ lm_cfg = hw->lm_cfg;
+
+ *(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_LOW) =
+ log_base & IFCVF_32_BIT_MASK;
+
+ *(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_HIGH) =
+ (log_base >> 32) & IFCVF_32_BIT_MASK;
+
+ *(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_LOW) =
+ (log_base + log_size) & IFCVF_32_BIT_MASK;
+
+ *(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_HIGH) =
+ ((log_base + log_size) >> 32) & IFCVF_32_BIT_MASK;
+
+ *(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_ENABLE_VF;
+}
+
+void
+ifcvf_disable_logging(struct ifcvf_hw *hw)
+{
+ u8 *lm_cfg;
+
+ lm_cfg = hw->lm_cfg;
+ *(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_DISABLE;
+}
+
+void
ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
{
IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]);
diff --git a/drivers/net/ifc/base/ifcvf.h b/drivers/net/ifc/base/ifcvf.h
index badacb61..f026c70a 100644
--- a/drivers/net/ifc/base/ifcvf.h
+++ b/drivers/net/ifc/base/ifcvf.h
@@ -49,6 +49,7 @@
#define IFCVF_LM_DISABLE 0x0
#define IFCVF_LM_ENABLE_VF 0x1
#define IFCVF_LM_ENABLE_PF 0x3
+#define IFCVF_LOG_BASE 0x100000000000
#define IFCVF_32_BIT_MASK 0xffffffff
@@ -143,6 +144,12 @@ void
ifcvf_stop_hw(struct ifcvf_hw *hw);
void
+ifcvf_enable_logging(struct ifcvf_hw *hw, u64 log_base, u64 log_size);
+
+void
+ifcvf_disable_logging(struct ifcvf_hw *hw);
+
+void
ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
u8
diff --git a/drivers/net/ifc/base/ifcvf_osdep.h b/drivers/net/ifc/base/ifcvf_osdep.h
index cf151ef5..6aef25ea 100644
--- a/drivers/net/ifc/base/ifcvf_osdep.h
+++ b/drivers/net/ifc/base/ifcvf_osdep.h
@@ -17,7 +17,7 @@
#define DEBUGOUT(S, args...) RTE_LOG(DEBUG, PMD, S, ##args)
#define STATIC static
-#define msec_delay rte_delay_ms
+#define msec_delay(x) rte_delay_us_sleep(1000 * (x))
#define IFCVF_READ_REG8(reg) rte_read8(reg)
#define IFCVF_WRITE_REG8(val, reg) rte_write8((val), (reg))
diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c
index 88d81403..97a57f18 100644
--- a/drivers/net/ifc/ifcvf_vdpa.c
+++ b/drivers/net/ifc/ifcvf_vdpa.c
@@ -7,6 +7,7 @@
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/epoll.h>
+#include <linux/virtio_net.h>
#include <rte_malloc.h>
#include <rte_memory.h>
@@ -111,7 +112,6 @@ ifcvf_vfio_setup(struct ifcvf_internal *internal)
struct rte_pci_device *dev = internal->pdev;
char devname[RTE_DEV_NAME_MAX_LEN] = {0};
int iommu_group_num;
- int ret = 0;
int i;
internal->vfio_dev_fd = -1;
@@ -145,9 +145,8 @@ ifcvf_vfio_setup(struct ifcvf_internal *internal)
internal->hw.mem_resource[i].len =
internal->pdev->mem_resource[i].len;
}
- ret = ifcvf_init_hw(&internal->hw, internal->pdev);
- return ret;
+ return 0;
err:
rte_vfio_container_destroy(internal->vfio_container_fd);
@@ -205,7 +204,7 @@ exit:
}
static uint64_t
-qva_to_gpa(int vid, uint64_t qva)
+hva_to_gpa(int vid, uint64_t hva)
{
struct rte_vhost_memory *mem = NULL;
struct rte_vhost_mem_region *reg;
@@ -218,9 +217,9 @@ qva_to_gpa(int vid, uint64_t qva)
for (i = 0; i < mem->nregions; i++) {
reg = &mem->regions[i];
- if (qva >= reg->host_user_addr &&
- qva < reg->host_user_addr + reg->size) {
- gpa = qva - reg->host_user_addr + reg->guest_phys_addr;
+ if (hva >= reg->host_user_addr &&
+ hva < reg->host_user_addr + reg->size) {
+ gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
break;
}
}
@@ -246,21 +245,21 @@ vdpa_ifcvf_start(struct ifcvf_internal *internal)
for (i = 0; i < nr_vring; i++) {
rte_vhost_get_vhost_vring(vid, i, &vq);
- gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+ gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
if (gpa == 0) {
DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
return -1;
}
hw->vring[i].desc = gpa;
- gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+ gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
if (gpa == 0) {
DRV_LOG(ERR, "Fail to get GPA for available ring.");
return -1;
}
hw->vring[i].avail = gpa;
- gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+ gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
if (gpa == 0) {
DRV_LOG(ERR, "Fail to get GPA for used ring.");
return -1;
@@ -277,11 +276,29 @@ vdpa_ifcvf_start(struct ifcvf_internal *internal)
}
static void
+ifcvf_used_ring_log(struct ifcvf_hw *hw, uint32_t queue, uint8_t *log_buf)
+{
+ uint32_t i, size;
+ uint64_t pfn;
+
+ pfn = hw->vring[queue].used / PAGE_SIZE;
+ size = hw->vring[queue].size * sizeof(struct vring_used_elem) +
+ sizeof(uint16_t) * 3;
+
+ for (i = 0; i <= size / PAGE_SIZE; i++)
+ __sync_fetch_and_or_8(&log_buf[(pfn + i) / 8],
+ 1 << ((pfn + i) % 8));
+}
+
+static void
vdpa_ifcvf_stop(struct ifcvf_internal *internal)
{
struct ifcvf_hw *hw = &internal->hw;
uint32_t i;
int vid;
+ uint64_t features;
+ uint64_t log_base, log_size;
+ uint8_t *log_buf;
vid = internal->vid;
ifcvf_stop_hw(hw);
@@ -289,6 +306,21 @@ vdpa_ifcvf_stop(struct ifcvf_internal *internal)
for (i = 0; i < hw->nr_vring; i++)
rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
hw->vring[i].last_used_idx);
+
+ rte_vhost_get_negotiated_features(vid, &features);
+ if (RTE_VHOST_NEED_LOG(features)) {
+ ifcvf_disable_logging(hw);
+ rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
+ rte_vfio_container_dma_unmap(internal->vfio_container_fd,
+ log_base, IFCVF_LOG_BASE, log_size);
+ /*
+ * IFCVF marks dirty memory pages for only packet buffer,
+ * SW helps to mark the used ring as dirty after device stops.
+ */
+ log_buf = (uint8_t *)(uintptr_t)log_base;
+ for (i = 0; i < hw->nr_vring; i++)
+ ifcvf_used_ring_log(hw, i, log_buf);
+ }
}
#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
@@ -469,11 +501,11 @@ update_datapath(struct ifcvf_internal *internal)
if (ret)
goto err;
- ret = setup_notify_relay(internal);
+ ret = vdpa_ifcvf_start(internal);
if (ret)
goto err;
- ret = vdpa_ifcvf_start(internal);
+ ret = setup_notify_relay(internal);
if (ret)
goto err;
@@ -481,12 +513,12 @@ update_datapath(struct ifcvf_internal *internal)
} else if (rte_atomic32_read(&internal->running) &&
(!rte_atomic32_read(&internal->started) ||
!rte_atomic32_read(&internal->dev_attached))) {
- vdpa_ifcvf_stop(internal);
-
ret = unset_notify_relay(internal);
if (ret)
goto err;
+ vdpa_ifcvf_stop(internal);
+
ret = vdpa_disable_vfio_intr(internal);
if (ret)
goto err;
@@ -549,6 +581,35 @@ ifcvf_dev_close(int vid)
}
static int
+ifcvf_set_features(int vid)
+{
+ uint64_t features;
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+ uint64_t log_base, log_size;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_vhost_get_negotiated_features(vid, &features);
+
+ if (RTE_VHOST_NEED_LOG(features)) {
+ rte_vhost_get_log_base(vid, &log_base, &log_size);
+ rte_vfio_container_dma_map(internal->vfio_container_fd,
+ log_base, IFCVF_LOG_BASE, log_size);
+ ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
+ }
+
+ return 0;
+}
+
+static int
ifcvf_get_vfio_group_fd(int vid)
{
int did;
@@ -657,14 +718,14 @@ ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
return 0;
}
-struct rte_vdpa_dev_ops ifcvf_ops = {
+static struct rte_vdpa_dev_ops ifcvf_ops = {
.get_queue_num = ifcvf_get_queue_num,
.get_features = ifcvf_get_vdpa_features,
.get_protocol_features = ifcvf_get_protocol_features,
.dev_conf = ifcvf_dev_config,
.dev_close = ifcvf_dev_close,
.set_vring_state = NULL,
- .set_features = NULL,
+ .set_features = ifcvf_set_features,
.migration_done = NULL,
.get_vfio_group_fd = ifcvf_get_vfio_group_fd,
.get_vfio_device_fd = ifcvf_get_vfio_device_fd,
@@ -695,11 +756,18 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (ifcvf_vfio_setup(internal) < 0)
return -1;
+ if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0)
+ return -1;
+
internal->max_queues = IFCVF_MAX_QUEUES;
features = ifcvf_get_features(&internal->hw);
internal->features = (features &
~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
- (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
+ (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
+ (1ULL << VIRTIO_NET_F_CTRL_VQ) |
+ (1ULL << VIRTIO_NET_F_STATUS) |
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
+ (1ULL << VHOST_F_LOG_ALL);
internal->dev_addr.pci_addr = pci_dev->addr;
internal->dev_addr.type = PCI_ADDR;
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 70fdfe7c..431be026 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,7 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-ixgbe.2018.01.02.tar.gz released by the team which develop
+cid-ixgbe.2018.08.28.tar.gz released by the team which develop
basic drivers for any ixgbe NIC. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index ee7ce2e9..245ff75d 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_type.h"
#include "ixgbe_82598.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h
index 20aab9fc..8013f495 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.h
+++ b/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_82598_H_
#define _IXGBE_82598_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 26217212..7de753fd 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_type.h"
#include "ixgbe_82599.h"
@@ -87,9 +58,6 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
mac->ops.set_rate_select_speed =
ixgbe_set_hard_rate_select_speed;
- if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
- mac->ops.set_rate_select_speed =
- ixgbe_set_soft_rate_select_speed;
} else {
if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -561,16 +529,9 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
media_type = ixgbe_media_type_copper;
break;
- case IXGBE_DEV_ID_82599_LS:
- media_type = ixgbe_media_type_fiber_lco;
- break;
case IXGBE_DEV_ID_82599_QSFP_SF_QP:
media_type = ixgbe_media_type_fiber_qsfp;
break;
- case IXGBE_DEV_ID_82599_BYPASS:
- media_type = ixgbe_media_type_fiber_fixed;
- hw->phy.multispeed_fiber = true;
- break;
default:
media_type = ixgbe_media_type_unknown;
break;
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.h b/drivers/net/ixgbe/base/ixgbe_82599.h
index d555dbce..a32eb1f5 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.h
+++ b/drivers/net/ixgbe/base/ixgbe_82599.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_82599_H_
#define _IXGBE_82599_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index e50c1045..873c0799 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_api.h"
#include "ixgbe_common.h"
@@ -177,8 +148,6 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_QSFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
- case IXGBE_DEV_ID_82599_LS:
- case IXGBE_DEV_ID_82599_BYPASS:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
@@ -193,7 +162,6 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
hw->mvals = ixgbe_mvals_X540;
break;
@@ -1360,6 +1328,18 @@ void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
}
/**
+ * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ **/
+bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.fw_recovery_mode)
+ return hw->mac.ops.fw_recovery_mode(hw);
+ return false;
+}
+
+/**
* ixgbe_enter_lplu - Transition to low power states
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index 2f532aa8..ff8f7b26 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_API_H_
#define _IXGBE_API_H_
@@ -214,6 +185,7 @@ void ixgbe_disable_mdd(struct ixgbe_hw *hw);
void ixgbe_enable_mdd(struct ixgbe_hw *hw);
void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
+bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw);
s32 ixgbe_enter_lplu(struct ixgbe_hw *hw);
s32 ixgbe_handle_lasi(struct ixgbe_hw *hw);
void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed);
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index e7e9256e..21f973e5 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
@@ -167,7 +138,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
/* flow control autoneg black list */
@@ -201,7 +171,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_X540_BYPASS:
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -267,7 +236,6 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
goto out;
/* fall through - only backplane uses autoc */
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -3127,7 +3095,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
@@ -5267,7 +5234,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
IXGBE_LINK_SPEED_10GB_FULL);
@@ -5296,7 +5262,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
* Section 73.10.2, we may have to wait up to 500ms if KR is
* attempted. 82599 uses the same timing for 10g SFI.
*/
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 10; i++) {
/* Wait for the link partner to also set speed */
msec_delay(100);
@@ -5318,7 +5284,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
IXGBE_LINK_SPEED_1GB_FULL);
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index fd35dcc4..3bb24751 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_COMMON_H_
#define _IXGBE_COMMON_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb.c b/drivers/net/ixgbe/base/ixgbe_dcb.c
index 2877f22b..a590e0e0 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/base/ixgbe_dcb.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_type.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb.h b/drivers/net/ixgbe/base/ixgbe_dcb.h
index 41208049..503d0601 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/base/ixgbe_dcb.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_DCB_H_
#define _IXGBE_DCB_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82598.c b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
index 3ed8337b..d87cb588 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_type.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82598.h b/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
index eb88b3d3..1a147444 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_DCB_82598_H_
#define _IXGBE_DCB_82598_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82599.c b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
index 8f9e1590..f4f0ff01 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_type.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82599.h b/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
index dc0fb284..085ada27 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_DCB_82599_H_
#define _IXGBE_DCB_82599_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
index 40dad775..67a124d8 100644
--- a/drivers/net/ixgbe/base/ixgbe_hv_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_vf.h"
#include "ixgbe_hv_vf.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.h b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
index 9119f29f..9664f3bd 100644
--- a/drivers/net/ixgbe/base/ixgbe_hv_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2016, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_HV_VF_H_
#define _IXGBE_HV_VF_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.c b/drivers/net/ixgbe/base/ixgbe_mbx.c
index 2785bbad..cb82942d 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_type.h"
#include "ixgbe_mbx.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
index bde50a51..5d32cbc0 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index bb5dfd2a..ea8dc1cb 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -1,36 +1,6 @@
-/******************************************************************************
-
- Copyright (c) 2001-2015, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_OS_H_
#define _IXGBE_OS_H_
@@ -51,7 +21,7 @@
#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
-#define DELAY(x) rte_delay_us(x)
+#define DELAY(x) rte_delay_us_sleep(x)
#define usec_delay(x) DELAY(x)
#define msec_delay(x) DELAY(1000*(x))
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 2df068ee..6cdd8fba 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_api.h"
#include "ixgbe_common.h"
@@ -2594,7 +2565,6 @@ STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
bool data;
- UNREFERENCED_1PARAMETER(hw);
DEBUGFUNC("ixgbe_get_i2c_data");
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
index cf8cadd9..132fa542 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.h
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_PHY_H_
#define _IXGBE_PHY_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 6e03089e..cee6ba2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
@@ -46,8 +17,8 @@ POSSIBILITY OF SUCH DAMAGE.
*
* - IXGBE_ERROR_POLLING
* This category is for errors related to polling/timeout issues and should be
- * used in any case where the timeout occured, or a failure to obtain a lock, or
- * failure to receive data within the time limit.
+ * used in any case where the timeout occurred, or a failure to obtain a lock,
+ * or failure to receive data within the time limit.
*
* - IXGBE_ERROR_CAUTION
* This category should be used for reporting issues that may be the cause of
@@ -122,12 +93,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
-#define IXGBE_DEV_ID_82599_LS 0x154F
-#define IXGBE_DEV_ID_82599_BYPASS 0x155D
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
-#define IXGBE_DEV_ID_X540_BYPASS 0x155C
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
#define IXGBE_DEV_ID_X550T1 0x15D1
@@ -882,6 +850,10 @@ struct ixgbe_dmac_config {
#define IXGBE_RTTDQSEL 0x04904
#define IXGBE_RTTDT1C 0x04908
#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTQCNCR 0x08B00
+#define IXGBE_RTTQCNTG 0x04A90
+#define IXGBE_RTTBCNRD 0x0498C
+#define IXGBE_RTTQCNRR 0x0498C
#define IXGBE_RTTDTECC 0x04990
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
@@ -892,6 +864,7 @@ struct ixgbe_dmac_config {
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
#define IXGBE_RTTBCNRM 0x04980
+#define IXGBE_RTTQCNRM 0x04980
/* BCN (for DCB) Registers */
#define IXGBE_RTTBCNRS 0x04988
@@ -1099,6 +1072,9 @@ struct ixgbe_dmac_config {
#define IXGBE_FWSM_MODE_MASK 0xE
#define IXGBE_FWSM_TS_ENABLED 0x1
#define IXGBE_FWSM_FW_MODE_PT 0x4
+#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE (1 << 5)
+#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000
+#define IXGBE_FWSM_FW_VAL_BIT (1 << 15)
/* ARC Subsystem registers */
#define IXGBE_HICR 0x15F00
@@ -3755,9 +3731,7 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
- ixgbe_media_type_fiber_fixed,
ixgbe_media_type_fiber_qsfp,
- ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
@@ -4050,6 +4024,7 @@ struct ixgbe_mac_operations {
void (*enable_mdd)(struct ixgbe_hw *hw);
void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+ bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index 5b25a6b4..aac37822 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_api.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
index 3efffe82..dba643fc 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_VF_H_
#define _IXGBE_VF_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 716664bb..f00f0eae 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_x540.h"
#include "ixgbe_type.h"
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h
index 8a19ae2e..231dfe56 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.h
+++ b/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_X540_H_
#define _IXGBE_X540_H_
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index f66f5407..f7b98af5 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#include "ixgbe_x550.h"
#include "ixgbe_x540.h"
@@ -82,6 +53,7 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
mac->ops.mdd_event = ixgbe_mdd_event_X550;
mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
+ mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
mac->ops.disable_rx = ixgbe_disable_rx_x550;
/* Manageability interface */
mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
@@ -349,7 +321,7 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
- return ixgbe_identify_module_generic(hw);
+ return ixgbe_identify_sfp_module_X550em(hw);
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
ixgbe_setup_mux_ctl(hw);
@@ -357,7 +329,7 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
/* Fallthrough */
case IXGBE_DEV_ID_X550EM_A_SFP_N:
- return ixgbe_identify_module_generic(hw);
+ return ixgbe_identify_sfp_module_X550em(hw);
break;
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
@@ -2808,9 +2780,9 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
(IXGBE_CS4227_EDC_MODE_SR << 1));
if (setup_linear)
- reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
else
- reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
ret_val = hw->phy.ops.write_reg(hw, reg_slice,
IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
@@ -4661,3 +4633,18 @@ s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
return ret_val;
}
+
+/**
+ * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
+ * @hw: pointer t hardware structure
+ *
+ * Returns true if in FW NVM recovery mode.
+ **/
+bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
+
+ return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
index 6d188741..3bd98f24 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.h
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -1,35 +1,6 @@
-/*******************************************************************************
-
-Copyright (c) 2001-2015, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2018
+ */
#ifndef _IXGBE_X550_H_
#define _IXGBE_X550_H_
@@ -121,4 +92,5 @@ s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw);
s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx);
+bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw);
#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ixgbe/base/meson.build b/drivers/net/ixgbe/base/meson.build
index 3147e110..21ac64bf 100644
--- a/drivers/net/ixgbe/base/meson.build
+++ b/drivers/net/ixgbe/base/meson.build
@@ -20,6 +20,9 @@ sources = [
error_cflags = ['-Wno-unused-value',
'-Wno-unused-but-set-variable']
c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
foreach flag: error_cflags
if cc.has_argument(flag)
c_args += flag
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 26b19273..269595b7 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -217,8 +217,7 @@ static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
- struct rte_intr_handle *handle);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
@@ -437,7 +436,6 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = {
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
@@ -1119,6 +1117,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
return -EIO;
}
+ if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
+ PMD_INIT_LOG(ERR, "\nERROR: "
+ "Firmware recovery mode detected. Limiting functionality.\n"
+ "Refer to the Intel(R) Ethernet Adapters and Devices "
+ "User Guide for details on firmware recovery mode.");
+ return -EIO;
+ }
+
/* pick up the PCI bus settings for reporting later */
ixgbe_get_bus_info(hw);
@@ -1331,12 +1337,6 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
- rte_free(eth_dev->data->hash_mac_addrs);
- eth_dev->data->hash_mac_addrs = NULL;
-
/* remove all the fdir filters & hash */
ixgbe_fdir_filter_uninit(eth_dev);
@@ -1619,7 +1619,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
*/
if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return diag;
+ /*
+ * This error code will be propagated to the app by
+ * rte_eth_dev_reset, so use a public error code rather than
+ * the internal-only IXGBE_ERR_RESET_FAILED
+ */
+ return -EAGAIN;
}
/* negotiate mailbox API version to use with the PF. */
@@ -1711,9 +1716,6 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
/* Disable the interrupts for VF */
ixgbevf_intr_disable(eth_dev);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
ixgbevf_dev_interrupt_handler, eth_dev);
@@ -4282,8 +4284,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
- struct rte_intr_handle *intr_handle)
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -4334,7 +4335,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "enable intr immediately");
ixgbe_enable_intr(dev);
- rte_intr_enable(intr_handle);
return 0;
}
@@ -4417,7 +4417,7 @@ ixgbe_dev_interrupt_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
ixgbe_dev_interrupt_get_status(dev);
- ixgbe_dev_interrupt_action(dev, dev->intr_handle);
+ ixgbe_dev_interrupt_action(dev);
}
static int
@@ -5008,14 +5008,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
}
#endif
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 1adf1b80..f0fafebc 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -363,6 +363,17 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((ipv4_mask->hdr.src_addr != 0 &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr != 0 &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+ ipv4_mask->hdr.next_proto_id != 0)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
@@ -432,6 +443,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((tcp_mask->hdr.src_port != 0 &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port != 0 &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = tcp_mask->hdr.dst_port;
filter->src_port_mask = tcp_mask->hdr.src_port;
@@ -467,6 +487,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((udp_mask->hdr.src_port != 0 &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port != 0 &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 08405f1e..5a416885 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -364,6 +364,7 @@ ixgbe_crypto_create_session(void *device,
conf->crypto_xform->aead.algo !=
RTE_CRYPTO_AEAD_AES_GCM) {
PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
+ rte_mempool_put(mempool, (void *)ic_session);
return -ENOTSUP;
}
aead_xform = &conf->crypto_xform->aead;
@@ -373,6 +374,7 @@ ixgbe_crypto_create_session(void *device,
ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
+ rte_mempool_put(mempool, (void *)ic_session);
return -ENOTSUP;
}
} else {
@@ -380,6 +382,7 @@ ixgbe_crypto_create_session(void *device,
ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
} else {
PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
+ rte_mempool_put(mempool, (void *)ic_session);
return -ENOTSUP;
}
}
@@ -395,6 +398,7 @@ ixgbe_crypto_create_session(void *device,
if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
if (ixgbe_crypto_add_sa(ic_session)) {
PMD_DRV_LOG(ERR, "Failed to add SA\n");
+ rte_mempool_put(mempool, (void *)ic_session);
return -EPERM;
}
}
@@ -609,7 +613,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (rte_eth_dev_must_keep_crc(rx_offloads)) {
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index f82b74a9..2f0262ae 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -58,6 +58,10 @@
#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
@@ -2057,8 +2061,7 @@ next_desc:
* of the ixgbe PMD.
*
* TODO:
- * - Get rid of "volatile" crap and let the compiler do its
- * job.
+ * - Get rid of "volatile" and let the compiler do its job.
* - Use the proper memory barrier (rte_rmb()) to ensure the
* memory ordering below.
*/
@@ -2848,7 +2851,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER;
@@ -2936,7 +2938,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@@ -4705,7 +4707,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (rte_eth_dev_must_keep_crc(rx_conf->offloads) &&
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
@@ -4854,7 +4856,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -4895,8 +4897,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ?
- ETHER_CRC_LEN : 0;
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -4965,7 +4969,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5702,7 +5706,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
*/
if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
ixgbe_rss_disable(dev);
- return -EINVAL;
+ return 0;
}
if (rss_conf.rss_key == NULL)
rss_conf.rss_key = rss_intel_key; /* Default hash key */
@@ -5715,13 +5719,13 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
}
/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+__rte_weak int
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
ixgbe_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
@@ -5730,7 +5734,7 @@ ixgbe_recv_pkts_vec(
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
ixgbe_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
@@ -5739,7 +5743,7 @@ ixgbe_recv_scattered_pkts_vec(
return 0;
}
-int __attribute__((weak))
+__rte_weak int
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
{
return -1;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
index db516d99..5d2e3e02 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -65,7 +65,7 @@ ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
/**< Device TX offload capabilities. */
dev_info->speed_capa =
@@ -135,7 +135,7 @@ ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
representor->vf_id, on);
}
-struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
+static const struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
.dev_infos_get = ixgbe_vf_representor_dev_infos_get,
.dev_start = ixgbe_vf_representor_dev_start,
@@ -192,6 +192,7 @@ ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
return -ENODEV;
ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ ethdev->data->representor_id = representor->vf_id;
/* Set representor device ops */
ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
@@ -225,7 +226,10 @@ ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
}
int
-ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev)
{
+ /* mac_addrs must not be freed because part of ixgbe_vf_info */
+ ethdev->data->mac_addrs = NULL;
+
return 0;
}
diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build
index 02d5ef5e..544a1414 100644
--- a/drivers/net/ixgbe/meson.build
+++ b/drivers/net/ixgbe/meson.build
@@ -5,10 +5,11 @@ version = 2
cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS']
+allow_experimental_apis = true
+
subdir('base')
objs = [base_objs]
-allow_experimental_apis = true
sources = files(
'ixgbe_82599_bypass.c',
'ixgbe_bypass.c',
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index 085bb845..a1e9970d 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -207,7 +207,6 @@ eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->min_rx_bufsize = 0;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -411,8 +410,7 @@ eth_kni_probe(struct rte_vdev_device *vdev)
params = rte_vdev_device_args(vdev);
PMD_LOG(INFO, "Initializing eth_kni for %s", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(params) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
PMD_LOG(ERR, "Failed to probe %s", name);
@@ -465,13 +463,17 @@ eth_kni_remove(struct rte_vdev_device *vdev)
if (eth_dev == NULL)
return -1;
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
+
eth_kni_dev_stop(eth_dev);
internals = eth_dev->data->dev_private;
rte_kni_release(internals->kni);
- rte_free(internals);
-
rte_eth_dev_release_port(eth_dev);
is_kni_initialized--;
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 93e89007..d13ab06c 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -2038,14 +2038,11 @@ lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ return 0;
/* lio_free_sc_buffer_pool */
lio_free_sc_buffer_pool(lio_dev);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index 9c28ed4d..980eec23 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -3,6 +3,8 @@
drivers = ['af_packet',
'ark',
+ 'atlantic',
+ 'avf',
'avp',
'axgbe', 'bonding',
'bnx2x',
@@ -11,6 +13,7 @@ drivers = ['af_packet',
'dpaa', 'dpaa2',
'e1000',
'ena',
+ 'enetc',
'enic',
'failsafe',
'fm10k', 'i40e',
@@ -18,16 +21,23 @@ drivers = ['af_packet',
'ixgbe',
'kni',
'liquidio',
+ 'mlx4',
+ 'mlx5',
+ 'mvneta',
'mvpp2',
'netvsc',
'nfp',
- 'null', 'octeontx', 'pcap', 'ring',
+ 'null', 'octeontx', 'pcap', 'qede', 'ring',
'sfc',
'softnic',
'szedata2',
+ 'tap',
'thunderx',
+ 'vdev_netvsc',
'vhost',
- 'virtio']
+ 'virtio',
+ 'vmxnet3',
+]
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
std_deps += ['bus_vdev'] # same with vdev bus
diff --git a/drivers/net/mlx4/meson.build b/drivers/net/mlx4/meson.build
new file mode 100644
index 00000000..7de571e2
--- /dev/null
+++ b/drivers/net/mlx4/meson.build
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 6WIND S.A.
+# Copyright 2018 Mellanox Technologies, Ltd
+
+pmd_dlopen = get_option('enable_driver_mlx_glue')
+LIB_GLUE_BASE = 'librte_pmd_mlx4_glue.so'
+LIB_GLUE_VERSION = '18.02.0'
+LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION
+if pmd_dlopen
+ dpdk_conf.set('RTE_LIBRTE_MLX4_DLOPEN_DEPS', 1)
+ cflags += [
+ '-DMLX4_GLUE="@0@"'.format(LIB_GLUE),
+ '-DMLX4_GLUE_VERSION="@0@"'.format(LIB_GLUE_VERSION),
+ ]
+endif
+libs = [
+ cc.find_library('mnl', required:false),
+ cc.find_library('mlx4', required:false),
+ cc.find_library('ibverbs', required:false),
+]
+build = true
+foreach lib:libs
+ if not lib.found()
+ build = false
+ endif
+endforeach
+# Compile PMD
+if build
+ allow_experimental_apis = true
+ ext_deps += libs
+ sources = files(
+ 'mlx4.c',
+ 'mlx4_ethdev.c',
+ 'mlx4_flow.c',
+ 'mlx4_intr.c',
+ 'mlx4_mr.c',
+ 'mlx4_rxq.c',
+ 'mlx4_rxtx.c',
+ 'mlx4_txq.c',
+ 'mlx4_utils.c',
+ )
+ if not pmd_dlopen
+ sources += files('mlx4_glue.c')
+ endif
+ cflags_options = [
+ '-Wextra',
+ '-std=c11',
+ '-Wno-strict-prototypes',
+ '-D_BSD_SOURCE',
+ '-D_DEFAULT_SOURCE',
+ '-D_XOPEN_SOURCE=600'
+ ]
+ foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+ endforeach
+ if get_option('buildtype').contains('debug')
+ cflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]
+ else
+ cflags += [ '-DNDEBUG', '-UPEDANTIC' ]
+ endif
+ # To maintain the compatibility with the make build system
+ # mlx4_autoconf.h file is still generated.
+ # input array for meson member search:
+ # [ "MACRO to define if found", "header for the search",
+ # "symbol to search","struct member to search" ]
+ #
+ has_member_args = [
+ [ 'HAVE_IBV_MLX4_WQE_LSO_SEG', 'infiniband/mlx4dv.h',
+ 'struct mlx4_wqe_lso_seg', 'mss_hdr_size' ],
+ ]
+ config = configuration_data()
+ foreach arg:has_member_args
+ file_prefix = '#include<' + arg[1] + '>'
+ config.set(arg[0], cc.has_member(arg[2], arg[3],
+ prefix : file_prefix))
+ endforeach
+ configure_file(output : 'mlx4_autoconf.h', configuration : config)
+endif
+# Build Glue Library
+if pmd_dlopen and build
+ dlopen_name = 'mlx4_glue'
+ dlopen_lib_name = driver_name_fmt.format(dlopen_name)
+ dlopen_so_version = LIB_GLUE_VERSION
+ dlopen_sources = files('mlx4_glue.c')
+ dlopen_install_dir = [ eal_pmd_path + '-glue' ]
+ shared_lib = shared_library(
+ dlopen_lib_name,
+ dlopen_sources,
+ include_directories: global_inc,
+ c_args: cflags,
+ dependencies: libs,
+ link_args: [
+ '-Wl,-export-dynamic',
+ '-Wl,-h,@0@'.format(LIB_GLUE),
+ ],
+ soversion: dlopen_so_version,
+ install: true,
+ install_dir: dlopen_install_dir,
+ )
+endif
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index defc0d4b..7f07b8dc 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -734,7 +734,6 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->device->driver = &mlx4_driver.driver;
/* Initialize local interrupt handle for current port. */
priv->intr_handle = (struct rte_intr_handle){
.fd = -1,
@@ -782,12 +781,17 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
continue;
port_error:
rte_free(priv);
+ if (eth_dev != NULL)
+ eth_dev->data->dev_private = NULL;
if (pd)
claim_zero(mlx4_glue->dealloc_pd(pd));
if (ctx)
claim_zero(mlx4_glue->close_device(ctx));
- if (eth_dev)
+ if (eth_dev != NULL) {
+ /* mac_addrs must not be freed because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(eth_dev);
+ }
break;
}
/*
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index d23d3c61..bee85864 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -289,6 +289,23 @@ mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
uintptr_t end = 0;
uint32_t idx = 0;
+ /* MR for external memory doesn't have memseg list. */
+ if (mr->msl == NULL) {
+ struct ibv_mr *ibv_mr = mr->ibv_mr;
+
+ assert(mr->ms_bmp_n == 1);
+ assert(mr->ms_n == 1);
+ assert(base_idx == 0);
+ /*
+ * Can't search it from memseg list but get it directly from
+ * verbs MR as there's only one chunk.
+ */
+ entry->start = (uintptr_t)ibv_mr->addr;
+ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ /* Returning 1 ends iteration. */
+ return 1;
+ }
for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
if (rte_bitmap_get(mr->ms_bmp, idx)) {
const struct rte_memseg_list *msl;
@@ -809,6 +826,7 @@ mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
mr = mr_lookup_dev_list(dev, &entry, start);
if (mr == NULL)
continue;
+ assert(mr->msl); /* Can't be external memory. */
ms = rte_mem_virt2memseg((void *)start, msl);
assert(ms != NULL);
assert(msl->page_sz == ms->hugepage_sz);
@@ -1055,6 +1073,133 @@ mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl)
(void *)mr_ctrl, mr_ctrl->cur_gen);
}
+/**
+ * Called during rte_mempool_mem_iter() by mlx4_mr_update_ext_mp().
+ *
+ * Externally allocated chunk is registered and a MR is created for the chunk.
+ * The MR object is added to the global list. If memseg list of a MR object
+ * (mr->msl) is null, the MR object can be regarded as externally allocated
+ * memory.
+ *
+ * Once external memory is registered, it should be static. If the memory is
+ * freed and the virtual address range has different physical memory mapped
+ * again, it may cause crash on device due to the wrong translation entry. PMD
+ * can't track the free event of the external memory for now.
+ */
+static void
+mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ struct rte_eth_dev *dev = data->dev;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl;
+ struct mlx4_mr *mr = NULL;
+ uintptr_t addr = (uintptr_t)memhdr->addr;
+ size_t len = memhdr->len;
+ struct mlx4_mr_cache entry;
+ uint32_t lkey;
+
+ /* If already registered, it should return. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_lookup_dev(dev, &entry, addr);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ if (lkey != UINT32_MAX)
+ return;
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (mr == NULL) {
+ WARN("port %u unable to allocate memory for a new MR of"
+ " mempool (%s).",
+ dev->data->port_id, mp->name);
+ data->ret = -1;
+ return;
+ }
+ DEBUG("port %u register MR for chunk #%d of mempool (%s)",
+ dev->data->port_id, mem_idx, mp->name);
+ mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ WARN("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_free(mr);
+ data->ret = -1;
+ return;
+ }
+ mr->msl = NULL; /* Mark it is external memory. */
+ mr->ms_bmp = NULL;
+ mr->ms_n = 1;
+ mr->ms_bmp_n = 1;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DEBUG("port %u MR CREATED (%p) for external memory %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Insert to the local cache table */
+ mlx4_mr_addr2mr_bh(dev, mr_ctrl, addr);
+}
+
+/**
+ * Register MR for entire memory chunks in a Mempool having externally allocated
+ * memory and fill in local cache.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static uint32_t
+mlx4_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx4_mr_update_ext_mp_cb, &data);
+ return data.ret;
+}
+
+/**
+ * Register MR entire memory chunks in a Mempool having externally allocated
+ * memory and search LKey of the address to return.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Search key.
+ * @param mp
+ * Pointer to registering Mempool where addr belongs.
+ *
+ * @return
+ * LKey for address on success, UINT32_MAX on failure.
+ */
+uint32_t
+mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq->priv;
+
+ mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp);
+ return mlx4_tx_addr2mr_bh(txq, addr);
+}
+
/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */
static void
mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
@@ -1098,6 +1243,10 @@ mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
};
rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data);
+ if (data.ret < 0 && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx4_mr_update_ext_mp(dev, mr_ctrl, mp);
+ }
return data.ret;
}
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 9737da2e..6804c634 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -678,7 +678,6 @@ uint64_t
mlx4_get_rx_queue_offloads(struct priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -780,7 +779,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
/* By default, FCS (CRC) is stripped by hardware. */
crc_present = 0;
- if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
if (priv->hw_fcs_strip) {
crc_present = 1;
} else {
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index ffa8abfc..1be060cd 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -163,6 +163,26 @@ void mlx4_tx_queue_release(void *dpdk_txq);
void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
+uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
+ struct rte_mempool *mp);
+
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx4_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
/**
* Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
@@ -222,6 +242,19 @@ mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
return mlx4_tx_addr2mr_bh(txq, addr);
}
-#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+static __rte_always_inline uint32_t
+mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey = mlx4_tx_addr2mr(txq, addr);
+
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ if (rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ lkey = mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
+ }
+ return lkey;
+}
#endif /* MLX4_RXTX_H_ */
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 2e70dec5..fecb57c1 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -8,7 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_mlx5.a
LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
-LIB_GLUE_VERSION = 18.05.0
+LIB_GLUE_VERSION = 18.11.0
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
@@ -31,9 +31,11 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_dv.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_tcf.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c
-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
@@ -135,6 +137,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_IBV_FLOW_DV_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_FLOW_ACTION_TAG \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_ETHTOOL_LINK_MODE_25G \
/usr/include/linux/ethtool.h \
enum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \
@@ -150,11 +157,16 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
+ HAVE_IBV_DEVICE_COUNTERS_SET_V42 \
infiniband/verbs.h \
type 'struct ibv_counter_set_init_attr' \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_COUNTERS_SET_V45 \
+ infiniband/verbs.h \
+ type 'struct ibv_counters_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_RDMA_NL_NLDEV \
rdma/rdma_netlink.h \
enum RDMA_NL_NLDEV \
@@ -200,6 +212,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum IFLA_PHYS_PORT_NAME \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_TCA_CHAIN \
+ linux/rtnetlink.h \
+ enum TCA_CHAIN \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_TCA_FLOWER_ACT \
linux/pkt_cls.h \
enum TCA_FLOWER_ACT \
@@ -335,11 +352,31 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum TCA_FLOWER_KEY_VLAN_ETH_TYPE \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_FLAGS \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_FLAGS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_FLAGS_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_GOTO_CHAIN \
+ linux/pkt_cls.h \
+ define TC_ACT_GOTO_CHAIN \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_TC_ACT_VLAN \
linux/tc_act/tc_vlan.h \
enum TCA_VLAN_PUSH_VLAN_PRIORITY \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_PEDIT \
+ linux/tc_act/tc_pedit.h \
+ enum TCA_PEDIT_KEY_EX_HDR_TYPE_UDP \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_SUPPORTED_40000baseKR4_Full \
/usr/include/linux/ethtool.h \
define SUPPORTED_40000baseKR4_Full \
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
new file mode 100644
index 00000000..e8cbe3ee
--- /dev/null
+++ b/drivers/net/mlx5/meson.build
@@ -0,0 +1,244 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 6WIND S.A.
+# Copyright 2018 Mellanox Technologies, Ltd
+
+pmd_dlopen = get_option('enable_driver_mlx_glue')
+LIB_GLUE_BASE = 'librte_pmd_mlx5_glue.so'
+LIB_GLUE_VERSION = '18.11.0'
+LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION
+if pmd_dlopen
+ dpdk_conf.set('RTE_LIBRTE_MLX5_DLOPEN_DEPS', 1)
+ cflags += [
+ '-DMLX5_GLUE="@0@"'.format(LIB_GLUE),
+ '-DMLX5_GLUE_VERSION="@0@"'.format(LIB_GLUE_VERSION),
+ ]
+endif
+libs = [
+ cc.find_library('mnl', required:false),
+ cc.find_library('mlx5', required:false),
+ cc.find_library('ibverbs', required:false),
+]
+build = true
+foreach lib:libs
+ if not lib.found()
+ build = false
+ endif
+endforeach
+if build
+ allow_experimental_apis = true
+ ext_deps += libs
+ sources = files(
+ 'mlx5.c',
+ 'mlx5_ethdev.c',
+ 'mlx5_flow.c',
+ 'mlx5_flow_dv.c',
+ 'mlx5_flow_tcf.c',
+ 'mlx5_flow_verbs.c',
+ 'mlx5_mac.c',
+ 'mlx5_mr.c',
+ 'mlx5_nl.c',
+ 'mlx5_rss.c',
+ 'mlx5_rxmode.c',
+ 'mlx5_rxq.c',
+ 'mlx5_rxtx.c',
+ 'mlx5_socket.c',
+ 'mlx5_stats.c',
+ 'mlx5_trigger.c',
+ 'mlx5_txq.c',
+ 'mlx5_vlan.c',
+ )
+ if dpdk_conf.has('RTE_ARCH_X86_64') or dpdk_conf.has('RTE_ARCH_ARM64')
+ sources += files('mlx5_rxtx_vec.c')
+ endif
+ if not pmd_dlopen
+ sources += files('mlx5_glue.c')
+ endif
+ cflags_options = [
+ '-Wextra',
+ '-std=c11',
+ '-Wno-strict-prototypes',
+ '-D_BSD_SOURCE',
+ '-D_DEFAULT_SOURCE',
+ '-D_XOPEN_SOURCE=600'
+ ]
+ foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+ endforeach
+ if get_option('buildtype').contains('debug')
+ cflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]
+ else
+ cflags += [ '-DNDEBUG', '-UPEDANTIC' ]
+ endif
+ # To maintain the compatibility with the make build system
+ # mlx5_autoconf.h file is still generated.
+ # input array for meson member search:
+ # [ "MACRO to define if found", "header for the search",
+ # "symbol to search", "struct member to search" ]
+ has_member_args = [
+ [ 'HAVE_IBV_MLX5_MOD_SWP', 'infiniband/mlx5dv.h',
+ 'struct mlx5dv_sw_parsing_caps', 'sw_parsing_offloads' ],
+ [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V42', 'infiniband/verbs.h',
+ 'struct ibv_counter_set_init_attr', 'counter_set_id' ],
+ [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V45', 'infiniband/verbs.h',
+ 'struct ibv_counters_init_attr', 'comp_mask' ],
+ ]
+ # input array for meson symbol search:
+ # [ "MACRO to define if found", "header for the search",
+ # "symbol to search" ]
+ has_sym_args = [
+ [ 'HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX' ],
+ [ 'HAVE_IBV_DEVICE_TUNNEL_SUPPORT', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS' ],
+ [ 'HAVE_IBV_MLX5_MOD_MPW', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED' ],
+ [ 'HAVE_IBV_MLX5_MOD_CQE_128B_COMP', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP' ],
+ [ 'HAVE_IBV_FLOW_DV_SUPPORT', 'infiniband/mlx5dv.h',
+ 'MLX5DV_FLOW_ACTION_TAG' ],
+ [ 'HAVE_IBV_DEVICE_MPLS_SUPPORT', 'infiniband/verbs.h',
+ 'IBV_FLOW_SPEC_MPLS' ],
+ [ 'HAVE_IBV_WQ_FLAG_RX_END_PADDING', 'infiniband/verbs.h',
+ 'IBV_WQ_FLAG_RX_END_PADDING' ],
+ [ 'HAVE_SUPPORTED_40000baseKR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseKR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseCR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseCR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseSR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseSR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseLR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseLR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseKR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseKR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseCR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseCR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseSR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseSR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseLR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseLR4_Full' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_25G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_25000baseCR_Full_BIT' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_50G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_100G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT' ],
+ [ 'HAVE_IFLA_PHYS_SWITCH_ID', 'linux/if_link.h',
+ 'IFLA_PHYS_SWITCH_ID' ],
+ [ 'HAVE_IFLA_PHYS_PORT_NAME', 'linux/if_link.h',
+ 'IFLA_PHYS_PORT_NAME' ],
+ [ 'HAVE_TCA_CHAIN', 'linux/rtnetlink.h',
+ 'TCA_CHAIN' ],
+ [ 'HAVE_TCA_FLOWER_ACT', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_ACT' ],
+ [ 'HAVE_TCA_FLOWER_FLAGS', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_FLAGS' ],
+ [ 'HAVE_TCA_FLOWER_KEY_ETH_TYPE', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_ETH_TYPE' ],
+ [ 'HAVE_TCA_FLOWER_KEY_ETH_DST', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_ETH_DST' ],
+ [ 'HAVE_TCA_FLOWER_KEY_ETH_DST_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_ETH_DST_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_ETH_SRC', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_ETH_SRC' ],
+ [ 'HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_ETH_SRC_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IP_PROTO', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IP_PROTO' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV4_SRC', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV4_SRC' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV4_SRC_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV4_DST', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV4_DST' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV4_DST_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV6_SRC', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV6_SRC' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV6_SRC_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV6_DST', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV6_DST' ],
+ [ 'HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_IPV6_DST_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_SRC', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_SRC' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_SRC_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_DST', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_DST' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_DST_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_DST_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_UDP_SRC', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_UDP_SRC' ],
+ [ 'HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_UDP_SRC_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_UDP_DST', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_UDP_DST' ],
+ [ 'HAVE_TCA_FLOWER_KEY_UDP_DST_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_UDP_DST_MASK' ],
+ [ 'HAVE_TCA_FLOWER_KEY_VLAN_ID', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_VLAN_ID' ],
+ [ 'HAVE_TCA_FLOWER_KEY_VLAN_PRIO', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_VLAN_PRIO' ],
+ [ 'HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_VLAN_ETH_TYPE' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_FLAGS', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_FLAGS' ],
+ [ 'HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_TCP_FLAGS_MASK' ],
+ [ 'HAVE_TC_ACT_GOTO_CHAIN', 'linux/pkt_cls.h',
+ 'TC_ACT_GOTO_CHAIN' ],
+ [ 'HAVE_TC_ACT_VLAN', 'linux/tc_act/tc_vlan.h',
+ 'TCA_VLAN_PUSH_VLAN_PRIORITY' ],
+ [ 'HAVE_TC_ACT_PEDIT', 'linux/tc_act/tc_pedit.h',
+ 'TCA_PEDIT_KEY_EX_HDR_TYPE_UDP' ],
+ [ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',
+ 'RDMA_NL_NLDEV' ],
+ [ 'HAVE_RDMA_NLDEV_CMD_GET', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_CMD_GET' ],
+ [ 'HAVE_RDMA_NLDEV_CMD_PORT_GET', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_CMD_PORT_GET' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_DEV_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_DEV_INDEX' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_DEV_NAME', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_DEV_NAME' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_PORT_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_PORT_INDEX' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_NDEV_INDEX' ],
+ ]
+ config = configuration_data()
+ foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2]))
+ endforeach
+ foreach arg:has_member_args
+ file_prefix = '#include<' + arg[1] + '>'
+ config.set(arg[0], cc.has_member(arg[2], arg[3],
+ prefix : file_prefix))
+ endforeach
+ configure_file(output : 'mlx5_autoconf.h', configuration : config)
+endif
+# Build Glue Library
+if pmd_dlopen and build
+ dlopen_name = 'mlx5_glue'
+ dlopen_lib_name = driver_name_fmt.format(dlopen_name)
+ dlopen_so_version = LIB_GLUE_VERSION
+ dlopen_sources = files('mlx5_glue.c')
+ dlopen_install_dir = [ eal_pmd_path + '-glue' ]
+ shared_lib = shared_library(
+ dlopen_lib_name,
+ dlopen_sources,
+ include_directories: global_inc,
+ c_args: cflags,
+ dependencies: libs,
+ link_args: [
+ '-Wl,-export-dynamic',
+ '-Wl,-h,@0@'.format(LIB_GLUE),
+ ],
+ soversion: dlopen_so_version,
+ install: true,
+ install_dir: dlopen_install_dir,
+ )
+endif
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ec63bc6e..a277b573 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -46,6 +46,7 @@
#include "mlx5_defs.h"
#include "mlx5_glue.h"
#include "mlx5_mr.h"
+#include "mlx5_flow.h"
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
@@ -89,6 +90,9 @@
/* Allow L3 VXLAN flow creation. */
#define MLX5_L3_VXLAN_EN "l3_vxlan_en"
+/* Activate DV flow steering. */
+#define MLX5_DV_FLOW_EN "dv_flow_en"
+
/* Activate Netlink support in VF mode. */
#define MLX5_VF_NL_EN "vf_nl_en"
@@ -282,8 +286,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma);
- if (priv->mnl_socket)
- mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ if (priv->tcf_context)
+ mlx5_flow_tcf_context_destroy(priv->tcf_context);
ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@@ -333,6 +337,17 @@ mlx5_dev_close(struct rte_eth_dev *dev)
}
memset(priv, 0, sizeof(*priv));
priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+ /*
+ * flag to rte_eth_dev_close() that it should release the port resources
+ * (calling rte_eth_dev_release_port()) in addition to closing it.
+ */
+ dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ /*
+ * Reset mac_addrs to NULL such that it is not freed as part of
+ * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
+ * it is freed when dev_private is freed.
+ */
+ dev->data->mac_addrs = NULL;
}
const struct eth_dev_ops mlx5_dev_ops = {
@@ -477,7 +492,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
config->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
- config->mps = !!tmp ? config->mps : 0;
+ config->mps = !!tmp;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
@@ -490,6 +505,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
config->l3_vxlan_en = !!tmp;
} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
config->vf_nl_en = !!tmp;
+ } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
+ config->dv_flow_en = !!tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
@@ -527,6 +544,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
MLX5_RX_VEC_EN,
MLX5_L3_VXLAN_EN,
MLX5_VF_NL_EN,
+ MLX5_DV_FLOW_EN,
MLX5_REPRESENTOR,
NULL,
};
@@ -568,11 +586,13 @@ static struct rte_pci_driver mlx5_driver;
static void *uar_base;
static int
-find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
+find_lower_va_bound(const struct rte_memseg_list *msl,
const struct rte_memseg *ms, void *arg)
{
void **addr = arg;
+ if (msl->external)
+ return 0;
if (*addr == NULL)
*addr = ms->addr;
else
@@ -685,9 +705,10 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
*
* @return
* A valid Ethernet device object on success, NULL otherwise and rte_errno
- * is set. The following error is defined:
+ * is set. The following errors are defined:
*
* EBUSY: device is not supposed to be spawned.
+ * EEXIST: device is already spawned
*/
static struct rte_eth_dev *
mlx5_dev_spawn(struct rte_device *dpdk_dev,
@@ -702,6 +723,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
struct mlx5_dev_config config = {
.vf = !!vf,
+ .mps = MLX5_ARG_UNSET,
.tx_vec_en = 1,
.rx_vec_en = 1,
.mpw_hdr_dseg = 0,
@@ -729,12 +751,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
unsigned int mprq_max_stride_size_n = 0;
unsigned int mprq_min_stride_num_n = 0;
unsigned int mprq_max_stride_num_n = 0;
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
-#endif
struct ether_addr mac;
char name[RTE_ETH_NAME_MAX_LEN];
int own_domain_id = 0;
+ uint16_t port_id;
unsigned int i;
/* Determine if this port representor is supposed to be spawned. */
@@ -757,6 +777,17 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
return NULL;
}
}
+ /* Build device name. */
+ if (!switch_info->representor)
+ rte_strlcpy(name, dpdk_dev->name, sizeof(name));
+ else
+ snprintf(name, sizeof(name), "%s_representor_%u",
+ dpdk_dev->name, switch_info->port_name);
+ /* check if the device is already spawned */
+ if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
+ rte_errno = EEXIST;
+ return NULL;
+ }
/* Prepare shared data between primary and secondary process. */
mlx5_prepare_shared_data();
errno = 0;
@@ -791,7 +822,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
DRV_LOG(DEBUG, "MPW isn't supported");
mps = MLX5_MPW_DISABLED;
}
- config.mps = mps;
#ifdef HAVE_IBV_MLX5_MOD_SWP
if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
@@ -864,11 +894,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
DEBUG("ibv_query_device_ex() failed");
goto error;
}
- if (!switch_info->representor)
- rte_strlcpy(name, dpdk_dev->name, sizeof(name));
- else
- snprintf(name, sizeof(name), "%s_representor_%u",
- dpdk_dev->name, switch_info->port_name);
DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
@@ -1000,12 +1025,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
DRV_LOG(DEBUG, "checksum offloading is %ssupported",
(config.hw_csum ? "" : "not "));
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- config.flow_counter_en = !!attr.max_counter_sets;
- mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
- DRV_LOG(DEBUG, "counter type = %d, num of cs = %ld, attributes = %d",
- cs_desc.counter_type, cs_desc.num_of_cs,
- cs_desc.attributes);
+#if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
+ !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ DRV_LOG(DEBUG, "counters are not supported");
+#endif
+#ifndef HAVE_IBV_FLOW_DV_SUPPORT
+ if (config.dv_flow_en) {
+ DRV_LOG(WARNING, "DV flow is not supported");
+ config.dv_flow_en = 0;
+ }
#endif
config.ind_table_max_size =
attr.rss_caps.max_rwq_indirection_table_size;
@@ -1035,13 +1063,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
(1 << IBV_QPT_RAW_PACKET)));
if (config.tso)
config.tso_max_payload_sz = attr.tso_caps.max_tso;
- if (config.mps && !mps) {
- DRV_LOG(ERR,
- "multi-packet send not supported on this device"
- " (" MLX5_TXQ_MPW_EN ")");
- err = ENOTSUP;
- goto error;
- }
+ /*
+ * MPW is disabled by default, while the Enhanced MPW is enabled
+ * by default.
+ */
+ if (config.mps == MLX5_ARG_UNSET)
+ config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
+ MLX5_MPW_DISABLED;
+ else
+ config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
DRV_LOG(INFO, "%sMPS is %s",
config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
@@ -1073,13 +1103,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOMEM;
goto error;
}
- if (priv->representor)
+ if (priv->representor) {
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->representor_id = priv->representor_id;
+ }
eth_dev->data->dev_private = priv;
priv->dev_data = eth_dev->data;
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = dpdk_dev;
- eth_dev->device->driver = &mlx5_driver.driver;
err = mlx5_uar_init_primary(eth_dev);
if (err) {
err = rte_errno;
@@ -1128,8 +1159,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
if (vf && config.vf_nl_en)
mlx5_nl_mac_addr_sync(eth_dev);
- priv->mnl_socket = mlx5_nl_flow_socket_create();
- if (!priv->mnl_socket) {
+ priv->tcf_context = mlx5_flow_tcf_context_create();
+ if (!priv->tcf_context) {
err = -rte_errno;
DRV_LOG(WARNING,
"flow rules relying on switch offloads will not be"
@@ -1144,16 +1175,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
error.message =
"cannot retrieve network interface index";
} else {
- err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
- &error);
+ err = mlx5_flow_tcf_init(priv->tcf_context,
+ ifindex, &error);
}
if (err) {
DRV_LOG(WARNING,
"flow rules relying on switch offloads will"
" not be supported: %s: %s",
error.message, strerror(rte_errno));
- mlx5_nl_flow_socket_destroy(priv->mnl_socket);
- priv->mnl_socket = NULL;
+ mlx5_flow_tcf_context_destroy(priv->tcf_context);
+ priv->tcf_context = NULL;
}
}
TAILQ_INIT(&priv->flows);
@@ -1208,16 +1239,21 @@ error:
close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma);
- if (priv->mnl_socket)
- mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ if (priv->tcf_context)
+ mlx5_flow_tcf_context_destroy(priv->tcf_context);
if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
rte_free(priv);
+ if (eth_dev != NULL)
+ eth_dev->data->dev_private = NULL;
}
if (pd)
claim_zero(mlx5_glue->dealloc_pd(pd));
- if (eth_dev)
+ if (eth_dev != NULL) {
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(eth_dev);
+ }
if (ctx)
claim_zero(mlx5_glue->close_device(ctx));
assert(err > 0);
@@ -1404,9 +1440,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
list[i].eth_dev = mlx5_dev_spawn
(&pci_dev->device, list[i].ibv_dev, vf, &list[i].info);
if (!list[i].eth_dev) {
- if (rte_errno != EBUSY)
+ if (rte_errno != EBUSY && rte_errno != EEXIST)
break;
- /* Device is disabled, ignore it. */
+ /* Device is disabled or already spawned. Ignore it. */
continue;
}
restore = list[i].eth_dev->data->dev_flags;
@@ -1437,8 +1473,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (!list[i].eth_dev)
continue;
mlx5_dev_close(list[i].eth_dev);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(list[i].eth_dev->data->dev_private);
+ /* mac_addrs must not be freed because in dev_private */
+ list[i].eth_dev->data->mac_addrs = NULL;
claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
}
/* Restore original error. */
@@ -1449,6 +1485,32 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
return ret;
}
+/**
+ * DPDK callback to remove a PCI device.
+ *
+ * This function removes all Ethernet devices belong to a given PCI device.
+ *
+ * @param[in] pci_dev
+ * Pointer to the PCI device.
+ *
+ * @return
+ * 0 on success, the function cannot fail.
+ */
+static int
+mlx5_pci_remove(struct rte_pci_device *pci_dev)
+{
+ uint16_t port_id;
+ struct rte_eth_dev *port;
+
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ port = &rte_eth_devices[port_id];
+ if (port->state != RTE_ETH_DEV_UNUSED &&
+ port->device == &pci_dev->device)
+ rte_eth_dev_close(port_id);
+ }
+ return 0;
+}
+
static const struct rte_pci_id mlx5_pci_id_map[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
@@ -1487,6 +1549,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
+ },
+ {
.vendor_id = 0
}
};
@@ -1497,7 +1563,9 @@ static struct rte_pci_driver mlx5_driver = {
},
.id_table = mlx5_pci_id_map,
.probe = mlx5_pci_probe,
- .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
+ .remove = mlx5_pci_remove,
+ .drv_flags = (RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
+ RTE_PCI_DRV_PROBE_AGAIN),
};
#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a3a34cff..74d87c05 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -51,6 +51,7 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF = 0xa2d3,
};
/** Switch information returned by mlx5_nl_switch_info(). */
@@ -71,12 +72,23 @@ struct mlx5_shared_data {
extern struct mlx5_shared_data *mlx5_shared_data;
+struct mlx5_counter_ctrl {
+ /* Name of the counter. */
+ char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
+ /* Name of the counter on the device table. */
+ char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t ib:1; /**< Nonzero for IB counters. */
+};
+
struct mlx5_xstats_ctrl {
/* Number of device stats. */
uint16_t stats_n;
+ /* Number of device stats identified by PMD. */
+ uint16_t mlx5_stats_n;
/* Index in the device counters table. */
uint16_t dev_table_idx[MLX5_MAX_XSTATS];
uint64_t base[MLX5_MAX_XSTATS];
+ struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS];
};
/* Flow list . */
@@ -99,11 +111,9 @@ struct mlx5_dev_config {
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int vf:1; /* This is a VF. */
- unsigned int mps:2; /* Multi-packet send supported mode. */
unsigned int tunnel_en:1;
/* Whether tunnel stateless offloads are supported. */
unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
- unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
@@ -111,6 +121,7 @@ struct mlx5_dev_config {
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
+ unsigned int dv_flow_en:1; /* Enable DV flow. */
unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
struct {
unsigned int enabled:1; /* Whether MPRQ is enabled. */
@@ -122,6 +133,7 @@ struct mlx5_dev_config {
unsigned int min_rxqs_num;
/* Rx queue count threshold to enable MPRQ. */
} mprq; /* Configurations for Multi-Packet RQ. */
+ int mps; /* Multi-packet send supported mode. */
unsigned int flow_prio; /* Number of flow priorities. */
unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
@@ -156,13 +168,7 @@ struct mlx5_drop {
struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
};
-/** DPDK port to network interface index (ifindex) conversion. */
-struct mlx5_nl_flow_ptoi {
- uint16_t port_id; /**< DPDK port ID. */
- unsigned int ifindex; /**< Network interface index. */
-};
-
-struct mnl_socket;
+struct mlx5_flow_tcf_context;
struct priv {
LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
@@ -212,6 +218,7 @@ struct priv {
LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
/* Verbs Indirection tables. */
LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
+ LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
int primary_socket; /* Unix socket for primary process. */
@@ -228,7 +235,7 @@ struct priv {
rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
/* UAR same-page access control required in 32bit implementations. */
#endif
- struct mnl_socket *mnl_socket; /* Libmnl socket. */
+ struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -240,12 +247,9 @@ int mlx5_getenv_int(const char *);
/* mlx5_ethdev.c */
-int mlx5_get_master_ifname(const struct rte_eth_dev *dev,
- char (*ifname)[IF_NAMESIZE]);
int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
-int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
- int master);
+int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr);
int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
unsigned int flags);
@@ -396,23 +400,4 @@ unsigned int mlx5_nl_ifindex(int nl, const char *name);
int mlx5_nl_switch_info(int nl, unsigned int ifindex,
struct mlx5_switch_info *info);
-/* mlx5_nl_flow.c */
-
-int mlx5_nl_flow_transpose(void *buf,
- size_t size,
- const struct mlx5_nl_flow_ptoi *ptoi,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action *actions,
- struct rte_flow_error *error);
-void mlx5_nl_flow_brand(void *buf, uint32_t handle);
-int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
- struct rte_flow_error *error);
-int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
- struct rte_flow_error *error);
-int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
- struct rte_flow_error *error);
-struct mnl_socket *mlx5_nl_flow_socket_create(void);
-void mlx5_nl_flow_socket_destroy(struct mnl_socket *nl);
-
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 34c5b95e..d178ed6a 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -3,8 +3,6 @@
* Copyright 2015 Mellanox Technologies, Ltd
*/
-#define _GNU_SOURCE
-
#include <stddef.h>
#include <assert.h>
#include <inttypes.h>
@@ -129,7 +127,7 @@ struct ethtool_link_settings {
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
+static int
mlx5_get_master_ifname(const struct rte_eth_dev *dev,
char (*ifname)[IF_NAMESIZE])
{
@@ -270,16 +268,12 @@ mlx5_ifindex(const struct rte_eth_dev *dev)
* Request number to pass to ioctl().
* @param[out] ifr
* Interface request structure output buffer.
- * @param master
- * When device is a port representor, perform request on master device
- * instead.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
- int master)
+mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
int ret = 0;
@@ -288,10 +282,7 @@ mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
rte_errno = errno;
return -rte_errno;
}
- if (master)
- ret = mlx5_get_master_ifname(dev, &ifr->ifr_name);
- else
- ret = mlx5_get_ifname(dev, &ifr->ifr_name);
+ ret = mlx5_get_ifname(dev, &ifr->ifr_name);
if (ret)
goto error;
ret = ioctl(sock, req, ifr);
@@ -321,7 +312,7 @@ int
mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
{
struct ifreq request;
- int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request, 0);
+ int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
if (ret)
return ret;
@@ -345,7 +336,7 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ifreq request = { .ifr_mtu = mtu, };
- return mlx5_ifreq(dev, SIOCSIFMTU, &request, 0);
+ return mlx5_ifreq(dev, SIOCSIFMTU, &request);
}
/**
@@ -365,13 +356,13 @@ int
mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
{
struct ifreq request;
- int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request, 0);
+ int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
if (ret)
return ret;
request.ifr_flags &= keep;
request.ifr_flags |= flags & ~keep;
- return mlx5_ifreq(dev, SIOCSIFFLAGS, &request, 0);
+ return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
}
/**
@@ -627,17 +618,20 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
int link_speed = 0;
int ret;
- ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
if (ret) {
DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
dev->data->port_id, strerror(rte_errno));
return ret;
}
- memset(&dev_link, 0, sizeof(dev_link));
- dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
- (ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = (void *)&edata;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ dev_link = (struct rte_eth_link) {
+ .link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING)),
+ };
+ ifr = (struct ifreq) {
+ .ifr_data = (void *)&edata,
+ };
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(WARNING,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
@@ -666,8 +660,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- if ((dev_link.link_speed && !dev_link.link_status) ||
- (!dev_link.link_speed && dev_link.link_status)) {
+ if (((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status))) {
rte_errno = EAGAIN;
return -rte_errno;
}
@@ -698,17 +692,20 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
uint64_t sc;
int ret;
- ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
if (ret) {
DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
dev->data->port_id, strerror(rte_errno));
return ret;
}
- memset(&dev_link, 0, sizeof(dev_link));
- dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
- (ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = (void *)&gcmd;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ dev_link = (struct rte_eth_link) {
+ .link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING)),
+ };
+ ifr = (struct ifreq) {
+ .ifr_data = (void *)&gcmd,
+ };
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(DEBUG,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
@@ -725,7 +722,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
*ecmd = gcmd;
ifr.ifr_data = (void *)ecmd;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(DEBUG,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
@@ -775,8 +772,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- if ((dev_link.link_speed && !dev_link.link_status) ||
- (!dev_link.link_speed && dev_link.link_status)) {
+ if (((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status))) {
rte_errno = EAGAIN;
return -rte_errno;
}
@@ -888,7 +885,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)&ethpause;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(WARNING,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
@@ -941,7 +938,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 0);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(WARNING,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
@@ -1306,10 +1303,7 @@ mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list,
RTE_ETH_FOREACH_DEV(id) {
struct rte_eth_dev *ldev = &rte_eth_devices[id];
- if (!ldev->device ||
- !ldev->device->driver ||
- strcmp(ldev->device->driver->name, MLX5_DRIVER_NAME) ||
- ldev->device != dev)
+ if (ldev->device != dev)
continue;
if (n < port_list_n)
port_list[n] = id;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ca4625b6..280af0ab 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -3,6 +3,7 @@
* Copyright 2016 Mellanox Technologies, Ltd
*/
+#include <netinet/in.h>
#include <sys/queue.h>
#include <stdalign.h>
#include <stdint.h>
@@ -31,74 +32,30 @@
#include "mlx5_defs.h"
#include "mlx5_prm.h"
#include "mlx5_glue.h"
+#include "mlx5_flow.h"
/* Dev ops structure defined in mlx5.c */
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-/* Pattern outer Layer bits. */
-#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
-#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
-#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
-#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
-#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
-
-/* Pattern inner Layer bits. */
-#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
-#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
-#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
-#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
-#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
-
-/* Pattern tunnel Layer bits. */
-#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
-#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
-#define MLX5_FLOW_LAYER_GRE (1u << 14)
-#define MLX5_FLOW_LAYER_MPLS (1u << 15)
-
-/* Outer Masks. */
-#define MLX5_FLOW_LAYER_OUTER_L3 \
- (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
-#define MLX5_FLOW_LAYER_OUTER_L4 \
- (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
-#define MLX5_FLOW_LAYER_OUTER \
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
- MLX5_FLOW_LAYER_OUTER_L4)
-
-/* Tunnel Masks. */
-#define MLX5_FLOW_LAYER_TUNNEL \
- (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
- MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
-
-/* Inner Masks. */
-#define MLX5_FLOW_LAYER_INNER_L3 \
- (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
-#define MLX5_FLOW_LAYER_INNER_L4 \
- (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
-#define MLX5_FLOW_LAYER_INNER \
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
- MLX5_FLOW_LAYER_INNER_L4)
-
-/* Actions that modify the fate of matching traffic. */
-#define MLX5_FLOW_FATE_DROP (1u << 0)
-#define MLX5_FLOW_FATE_QUEUE (1u << 1)
-#define MLX5_FLOW_FATE_RSS (1u << 2)
-
-/* Modify a packet. */
-#define MLX5_FLOW_MOD_FLAG (1u << 0)
-#define MLX5_FLOW_MOD_MARK (1u << 1)
-#define MLX5_FLOW_MOD_COUNT (1u << 2)
-
-/* possible L3 layers protocols filtering. */
-#define MLX5_IP_PROTOCOL_TCP 6
-#define MLX5_IP_PROTOCOL_UDP 17
-#define MLX5_IP_PROTOCOL_GRE 47
-#define MLX5_IP_PROTOCOL_MPLS 147
-
-/* Priority reserved for default flows. */
-#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+/** Device flow drivers. */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
+#endif
+extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops;
+extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
+
+const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
+
+const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
+ [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
+#endif
+ [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops,
+ [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
+ [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
+};
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
@@ -270,53 +227,6 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
},
};
-/** Handles information leading to a drop fate. */
-struct mlx5_flow_verbs {
- LIST_ENTRY(mlx5_flow_verbs) next;
- unsigned int size; /**< Size of the attribute. */
- struct {
- struct ibv_flow_attr *attr;
- /**< Pointer to the Specification buffer. */
- uint8_t *specs; /**< Pointer to the specifications. */
- };
- struct ibv_flow *flow; /**< Verbs flow pointer. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
- uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
-};
-
-/* Counters information. */
-struct mlx5_flow_counter {
- LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
- uint32_t shared:1; /**< Share counter ID with other flow rules. */
- uint32_t ref_cnt:31; /**< Reference counter. */
- uint32_t id; /**< Counter ID. */
- struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
- uint64_t hits; /**< Number of packets matched by the rule. */
- uint64_t bytes; /**< Number of bytes matched by the rule. */
-};
-
-/* Flow structure. */
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct rte_flow_attr attributes; /**< User flow attribute. */
- uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
- uint32_t layers;
- /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
- uint32_t modifier;
- /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
- uint32_t fate;
- /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
- uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
- LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
- struct mlx5_flow_verbs *cur_verbs;
- /**< Current Verbs flow structure being filled. */
- struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
- struct rte_flow_action_rss rss;/**< RSS context. */
- uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
- uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
- void *nl_flow; /**< Netlink flow buffer if relevant. */
-};
-
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -352,23 +262,6 @@ struct mlx5_fdir {
struct rte_flow_action_queue queue;
};
-/* Verbs specification header. */
-struct ibv_spec_header {
- enum ibv_flow_spec_type type;
- uint16_t size;
-};
-
-/*
- * Number of sub priorities.
- * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
- * matching on the NIC (firmware dependent) L4 most have the higher priority
- * followed by L3 and ending with L2.
- */
-#define MLX5_PRIORITY_MAP_L2 2
-#define MLX5_PRIORITY_MAP_L3 1
-#define MLX5_PRIORITY_MAP_L4 0
-#define MLX5_PRIORITY_MAP_MAX 3
-
/* Map of Verbs to Flow priority with 8 Verbs priorities. */
static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
{ 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
@@ -413,7 +306,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
* Discover the maximum number of priority available.
*
* @param[in] dev
- * Pointer to Ethernet device.
+ * Pointer to the Ethernet device structure.
*
* @return
* number of supported flow priority on success, a negative errno
@@ -478,160 +371,33 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
}
/**
- * Adjust flow priority.
+ * Adjust flow priority based on the highest layer and the request priority.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param flow
- * Pointer to an rte flow.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] priority
+ * The rule base priority.
+ * @param[in] subpriority
+ * The priority based on the items.
+ *
+ * @return
+ * The new priority.
*/
-static void
-mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
+uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ uint32_t subpriority)
{
+ uint32_t res = 0;
struct priv *priv = dev->data->dev_private;
- uint32_t priority = flow->attributes.priority;
- uint32_t subpriority = flow->cur_verbs->attr->priority;
switch (priv->config.flow_prio) {
case RTE_DIM(priority_map_3):
- priority = priority_map_3[priority][subpriority];
+ res = priority_map_3[priority][subpriority];
break;
case RTE_DIM(priority_map_5):
- priority = priority_map_5[priority][subpriority];
+ res = priority_map_5[priority][subpriority];
break;
}
- flow->cur_verbs->attr->priority = priority;
-}
-
-/**
- * Get a flow counter.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] shared
- * Indicate if this counter is shared with other flows.
- * @param[in] id
- * Counter identifier.
- *
- * @return
- * A pointer to the counter, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_flow_counter *
-mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
-{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter *cnt;
-
- LIST_FOREACH(cnt, &priv->flow_counters, next) {
- if (!cnt->shared || cnt->shared != shared)
- continue;
- if (cnt->id != id)
- continue;
- cnt->ref_cnt++;
- return cnt;
- }
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-
- struct mlx5_flow_counter tmpl = {
- .shared = shared,
- .id = id,
- .cs = mlx5_glue->create_counter_set
- (priv->ctx,
- &(struct ibv_counter_set_init_attr){
- .counter_set_id = id,
- }),
- .hits = 0,
- .bytes = 0,
- };
-
- if (!tmpl.cs) {
- rte_errno = errno;
- return NULL;
- }
- cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
- if (!cnt) {
- rte_errno = ENOMEM;
- return NULL;
- }
- *cnt = tmpl;
- LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
- return cnt;
-#endif
- rte_errno = ENOTSUP;
- return NULL;
-}
-
-/**
- * Release a flow counter.
- *
- * @param[in] counter
- * Pointer to the counter handler.
- */
-static void
-mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
-{
- if (--counter->ref_cnt == 0) {
- claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
- LIST_REMOVE(counter, next);
- rte_free(counter);
- }
-}
-
-/**
- * Verify the @p attributes will be correctly understood by the NIC and store
- * them in the @p flow if everything is correct.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] attributes
- * Pointer to flow attributes
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_attributes(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attributes,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- uint32_t priority_max =
- ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
-
- if (attributes->group)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups is not supported");
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
- attributes->priority >= priority_max)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priority out of range");
- if (attributes->egress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "egress is not supported");
- if (attributes->transfer)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
- NULL,
- "transfer is not supported");
- if (!attributes->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "ingress attribute is mandatory");
- flow->attributes = *attributes;
- if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
- flow->attributes.priority = priority_max;
- return 0;
+ return res;
}
/**
@@ -652,7 +418,7 @@ mlx5_flow_attributes(struct rte_eth_dev *dev,
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_item_acceptable(const struct rte_flow_item *item,
const uint8_t *mask,
const uint8_t *nic_mask,
@@ -671,8 +437,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
" bits");
if (!item->spec && (item->mask || item->last))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"mask/last without a spec is not"
" supported");
if (item->spec && item->last) {
@@ -687,206 +452,635 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
}
ret = memcmp(spec, last, size);
if (ret != 0)
- return rte_flow_error_set(error, ENOTSUP,
+ return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "range is not supported");
+ "range is not valid");
}
return 0;
}
/**
- * Add a verbs item specification into @p flow.
+ * Adjust the hash fields according to the @p flow information.
*
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] src
- * Create specification.
- * @param[in] size
- * Size in bytes of the specification to copy.
+ * @param[in] dev_flow.
+ * Pointer to the mlx5_flow.
+ * @param[in] tunnel
+ * 1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ * ETH_RSS_* types.
+ * @param[in] hash_fields
+ * Item hash fields.
+ *
+ * @return
+ * The hash fileds that should be used.
+ */
+uint64_t
+mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+ int tunnel __rte_unused, uint64_t layer_types,
+ uint64_t hash_fields)
+{
+ struct rte_flow *flow = dev_flow->flow;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ int rss_request_inner = flow->rss.level >= 2;
+
+ /* Check RSS hash level for tunnel. */
+ if (tunnel && rss_request_inner)
+ hash_fields |= IBV_RX_HASH_INNER;
+ else if (tunnel || rss_request_inner)
+ return 0;
+#endif
+ /* Check if requested layer matches RSS hash fields. */
+ if (!(flow->rss.types & layer_types))
+ return 0;
+ return hash_fields;
+}
+
+/**
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
+ *
+ * @param rxq_ctrl
+ * Rx queue to update.
*/
static void
-mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
+flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+ unsigned int i;
+ uint32_t tunnel_ptype = 0;
+
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
+ continue;
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
+ }
+ }
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
+}
+
+/**
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * flow.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] dev_flow
+ * Pointer to device flow structure.
+ */
+static void
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
+ const int mark = !!(flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
- if (verbs->specs) {
- void *dst;
+ if (mark) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n++;
+ }
+ if (tunnel) {
+ unsigned int j;
- dst = (void *)(verbs->specs + verbs->size);
- memcpy(dst, src, size);
- ++verbs->attr->num_of_specs;
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
+ }
+ flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
}
- verbs->size += size;
}
/**
- * Adjust verbs hash fields according to the @p flow information.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
- * @param[in, out] flow.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] flow
* Pointer to flow structure.
- * @param[in] tunnel
- * 1 when the hash field is for a tunnel item.
- * @param[in] layer_types
- * ETH_RSS_* types.
- * @param[in] hash_fields
- * Item hash fields.
*/
static void
-mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
- int tunnel __rte_unused,
- uint32_t layer_types, uint64_t hash_fields)
+flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_set(dev, dev_flow);
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * device flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] dev_flow
+ * Pointer to the device flow.
+ */
+static void
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
+ const int mark = !!(flow->actions &
+ (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->flow_mark_n--;
+ rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel &
+ dev_flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
+ }
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the flow.
+ */
+static void
+flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next)
+ flow_drv_rxq_flags_trim(dev, dev_flow);
+}
+
+/**
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+flow_rxq_flags_clear(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
+
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i],
+ struct mlx5_rxq_ctrl, rxq);
+ rxq_ctrl->flow_mark_n = 0;
+ rxq_ctrl->rxq.mark = 0;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
+ }
+}
+
+/*
+ * Validate the flag action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_flag(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't mark and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 flag"
+ " actions in same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "flag action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the mark action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+
+ if (!mark)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "configuration cannot be null");
+ if (mark->id >= MLX5_FLOW_MARK_MAX)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &mark->id,
+ "mark id must in 0 <= id < "
+ RTE_STR(MLX5_FLOW_MARK_MAX));
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't flag and mark in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 mark actions in same"
+ " flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "mark action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the drop action.
+ *
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_drop(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and flag in same flow");
+ if (action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and mark in same flow");
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "drop action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the queue action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions in"
+ " same flow");
+ if (queue->index >= priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue index out of range");
+ if (!(*priv->rxqs)[queue->index])
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue is not configured");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "queue action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the rss action.
+ *
+ * @param[in] action
+ * Pointer to the queue action.
+ * @param[in] action_flags
+ * Bit-fields that holds the actions detected until now.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_rss *rss = action->conf;
+ unsigned int i;
+
+ if (action_flags & MLX5_FLOW_FATE_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have 2 fate actions"
+ " in same flow");
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
- if (flow->rss.level == 2 && !tunnel)
- hash_fields = 0;
- else if (flow->rss.level < 2 && tunnel)
- hash_fields = 0;
+ if (rss->level > 2)
+#else
+ if (rss->level > 1)
#endif
- if (!(flow->rss.types & layer_types))
- hash_fields = 0;
- flow->cur_verbs->hash_fields |= hash_fields;
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (rss->queue_num > priv->config.ind_table_max_size)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (rss->types & MLX5_RSS_HF_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->types,
+ "some RSS protocols are not"
+ " supported");
+ for (i = 0; i != rss->queue_num; ++i) {
+ if (!(*priv->rxqs)[rss->queue[i]])
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i], "queue is not configured");
+ }
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "rss action not supported for "
+ "egress");
+ return 0;
+}
+
+/*
+ * Validate the count action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+int
+mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "count action not supported for "
+ "egress");
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint32_t priority_max = priv->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority out of range");
+ if (attributes->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "egress is not supported");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ return 0;
+}
+
+/**
+ * Validate Ethernet item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
const struct rte_flow_item_eth nic_mask = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.type = RTE_BE16(0xffff),
};
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- const unsigned int size = sizeof(struct ibv_flow_spec_eth);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
int ret;
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2))
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L2)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L2 layers already configured");
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "3 levels of l2 are not supported");
+ if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "2 L2 without tunnel are not supported");
if (!mask)
mask = &rte_flow_item_eth_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_eth),
error);
- if (ret)
- return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- if (size > flow_size)
- return size;
- if (spec) {
- unsigned int i;
-
- memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
- eth.val.ether_type = spec->type;
- memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
- eth.mask.ether_type = mask->type;
- /* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
- eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
- eth.val.src_mac[i] &= eth.mask.src_mac[i];
- }
- eth.val.ether_type &= eth.mask.ether_type;
- }
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, &eth, size);
- return size;
-}
-
-/**
- * Update the VLAN tag in the Verbs Ethernet specification.
- *
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] eth
- * Verbs structure containing the VLAN information to copy.
- */
-static void
-mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
- struct ibv_flow_spec_eth *eth)
-{
- unsigned int i;
- const enum ibv_flow_spec_type search = eth->type;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
-
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- struct ibv_flow_spec_eth *e =
- (struct ibv_flow_spec_eth *)hdr;
-
- e->val.vlan_tag = eth->val.vlan_tag;
- e->mask.vlan_tag = eth->mask.vlan_tag;
- e->val.ether_type = eth->val.ether_type;
- e->mask.ether_type = eth->mask.ether_type;
- break;
- }
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
- }
+ return ret;
}
/**
- * Convert the @p item into @p flow (or by updating the already present
- * Ethernet Verbs) specification after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VLAN item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
+ int64_t item_flags,
+ struct rte_flow_error *error)
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
@@ -894,100 +1088,66 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
.tci = RTE_BE16(0x0fff),
.inner_type = RTE_BE16(0xffff),
};
- unsigned int size = sizeof(struct ibv_flow_spec_eth);
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- struct ibv_flow_spec_eth eth = {
- .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ uint16_t vlan_tag = 0;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
MLX5_FLOW_LAYER_INNER_L4) :
- (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
+ (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4);
const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
- MLX5_FLOW_LAYER_OUTER_VLAN;
- const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ MLX5_FLOW_LAYER_OUTER_VLAN;
- if (flow->layers & vlanm)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (item_flags & vlanm)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"VLAN layer already configured");
- else if ((flow->layers & l34m) != 0)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ else if ((item_flags & l34m) != 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"L2 layer cannot follow L3/L4 layer");
if (!mask)
mask = &rte_flow_item_vlan_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_vlan), error);
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan),
+ error);
if (ret)
return ret;
if (spec) {
- eth.val.vlan_tag = spec->tci;
- eth.mask.vlan_tag = mask->tci;
- eth.val.vlan_tag &= eth.mask.vlan_tag;
- eth.val.ether_type = spec->inner_type;
- eth.mask.ether_type = mask->inner_type;
- eth.val.ether_type &= eth.mask.ether_type;
+ vlan_tag = spec->tci;
+ vlan_tag &= mask->tci;
}
/*
* From verbs perspective an empty VLAN is equivalent
* to a packet without VLAN layer.
*/
- if (!eth.mask.vlan_tag)
+ if (!vlan_tag)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
item->spec,
"VLAN cannot be empty");
- if (!(flow->layers & l2m)) {
- if (size <= flow_size) {
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- mlx5_flow_spec_verbs_add(flow, &eth, size);
- }
- } else {
- if (flow->cur_verbs)
- mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
- &eth);
- size = 0; /* Only an update is done in eth specification. */
- }
- flow->layers |= tunnel ?
- (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
- return size;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate IPV4 item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
+ int64_t item_flags,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
const struct rte_flow_item_ipv4 nic_mask = {
.hdr = {
@@ -997,97 +1157,48 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
.next_proto_id = 0xff,
},
};
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
- struct ibv_flow_spec_ipv4_ext ipv4 = {
- .type = IBV_FLOW_SPEC_IPV4_EXT |
- (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3))
+ if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"multiple L3 layers not supported");
- else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an L4 layer.");
if (!mask)
mask = &rte_flow_item_ipv4_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_ipv4), error);
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv4),
+ error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (spec) {
- ipv4.val = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = spec->hdr.src_addr,
- .dst_ip = spec->hdr.dst_addr,
- .proto = spec->hdr.next_proto_id,
- .tos = spec->hdr.type_of_service,
- };
- ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
- .src_ip = mask->hdr.src_addr,
- .dst_ip = mask->hdr.dst_addr,
- .proto = mask->hdr.next_proto_id,
- .tos = mask->hdr.type_of_service,
- };
- /* Remove unwanted bits from values. */
- ipv4.val.src_ip &= ipv4.mask.src_ip;
- ipv4.val.dst_ip &= ipv4.mask.dst_ip;
- ipv4.val.proto &= ipv4.mask.proto;
- ipv4.val.tos &= ipv4.mask.tos;
- }
- flow->l3_protocol_en = !!ipv4.mask.proto;
- flow->l3_protocol = ipv4.val.proto;
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER),
- (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv4, size);
- }
- return size;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate IPV6 item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
const struct rte_flow_item_ipv6 nic_mask = {
.hdr = {
@@ -1102,25 +1213,18 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
.hop_limits = 0xff,
},
};
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
- struct ibv_flow_spec_ipv6 ipv6 = {
- .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3))
+ if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"multiple L3 layers not supported");
- else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an L4 layer.");
/*
* IPv6 is not recognised by the NIC inside a GRE tunnel.
@@ -1128,130 +1232,64 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
* accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
* Mellanox OFED 4.4-1.0.0.0.
*/
- if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
+ if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"IPv6 inside a GRE tunnel is"
" not recognised.");
if (!mask)
mask = &rte_flow_item_ipv6_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_ipv6), error);
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (spec) {
- unsigned int i;
- uint32_t vtc_flow_val;
- uint32_t vtc_flow_mask;
-
- memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
- RTE_DIM(ipv6.val.src_ip));
- memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
- RTE_DIM(ipv6.val.dst_ip));
- memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
- RTE_DIM(ipv6.mask.src_ip));
- memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
- RTE_DIM(ipv6.mask.dst_ip));
- vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
- vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
- ipv6.val.flow_label =
- rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.val.next_hdr = spec->hdr.proto;
- ipv6.val.hop_limit = spec->hdr.hop_limits;
- ipv6.mask.flow_label =
- rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
- ipv6.mask.next_hdr = mask->hdr.proto;
- ipv6.mask.hop_limit = mask->hdr.hop_limits;
- /* Remove unwanted bits from values. */
- for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
- ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
- ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
- }
- ipv6.val.flow_label &= ipv6.mask.flow_label;
- ipv6.val.traffic_class &= ipv6.mask.traffic_class;
- ipv6.val.next_hdr &= ipv6.mask.next_hdr;
- ipv6.val.hop_limit &= ipv6.mask.hop_limit;
- }
- flow->l3_protocol_en = !!ipv6.mask.next_hdr;
- flow->l3_protocol = ipv6.val.next_hdr;
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust
- (flow, tunnel,
- (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
- (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
- mlx5_flow_spec_verbs_add(flow, &ipv6, size);
- }
- return size;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate UDP item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[in] flow_mask
+ * mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp udp = {
- .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
" with UDP layer");
- if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3)))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L3 is mandatory to filter"
- " on L4");
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "L4 layer is already"
- " present");
+ if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L4 layer is already present");
if (!mask)
mask = &rte_flow_item_udp_mask;
ret = mlx5_flow_item_acceptable
@@ -1260,178 +1298,118 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
sizeof(struct rte_flow_item_udp), error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- if (spec) {
- udp.val.dst_port = spec->hdr.dst_port;
- udp.val.src_port = spec->hdr.src_port;
- udp.mask.dst_port = mask->hdr.dst_port;
- udp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- udp.val.src_port &= udp.mask.src_port;
- udp.val.dst_port &= udp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
- (IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &udp, size);
- }
- return size;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate TCP item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ const struct rte_flow_item_tcp *flow_mask,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
- struct ibv_flow_spec_tcp_udp tcp = {
- .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
- .size = size,
- };
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ assert(flow_mask);
+ if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
" with TCP layer");
- if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3)))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 is mandatory to filter on L4");
- if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"L4 layer is already present");
if (!mask)
mask = &rte_flow_item_tcp_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_tcp_mask,
+ (const uint8_t *)flow_mask,
sizeof(struct rte_flow_item_tcp), error);
if (ret < 0)
return ret;
- flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- if (spec) {
- tcp.val.dst_port = spec->hdr.dst_port;
- tcp.val.src_port = spec->hdr.src_port;
- tcp.mask.dst_port = mask->hdr.dst_port;
- tcp.mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- tcp.val.src_port &= tcp.mask.src_port;
- tcp.val.dst_port &= tcp.mask.dst_port;
- }
- if (size <= flow_size) {
- mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
- (IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP));
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
- mlx5_flow_spec_verbs_add(flow, &tcp, size);
- }
- return size;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VXLAN item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
- const size_t flow_size, struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error)
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
int ret;
union vni {
uint32_t vlan_id;
uint8_t vni[4];
} id = { .vlan_id = 0, };
+ uint32_t vlan_id = 0;
+
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"a tunnel is already present");
/*
* Verify only UDPv4 is present as defined in
* https://tools.ietf.org/html/rfc7348
*/
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_vxlan_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_vxlan_mask,
- sizeof(struct rte_flow_item_vxlan), error);
+ sizeof(struct rte_flow_item_vxlan),
+ error);
if (ret < 0)
return ret;
if (spec) {
memcpy(&id.vni[1], spec->vni, 3);
- vxlan.val.tunnel_id = id.vlan_id;
+ vlan_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
- vxlan.mask.tunnel_id = id.vlan_id;
- /* Remove unwanted bits from values. */
- vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ vlan_id &= id.vlan_id;
}
/*
* Tunnel id 0 is equivalent as not adding a VXLAN layer, if
@@ -1442,109 +1420,88 @@ mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
* match this rule. To avoid such situation, VNI 0 is
* currently refused.
*/
- if (!vxlan.val.tunnel_id)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (!vlan_id)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"VXLAN vni cannot be 0");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"VXLAN tunnel must be fully defined");
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN;
- return size;
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate VXLAN_GPE item.
*
- * @param dev
- * Pointer to Ethernet device.
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] priv
+ * Pointer to the private data structure.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
- const struct rte_flow_item *item,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
{
+ struct priv *priv = dev->data->dev_private;
const struct rte_flow_item_vxlan_gpe *spec = item->spec;
const struct rte_flow_item_vxlan_gpe *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan_gpe = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
int ret;
union vni {
uint32_t vlan_id;
uint8_t vni[4];
} id = { .vlan_id = 0, };
+ uint32_t vlan_id = 0;
- if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
+ if (!priv->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 VXLAN is not enabled by device"
" parameter and/or not configured in"
" firmware");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"a tunnel is already present");
/*
* Verify only UDPv4 is present as defined in
* https://tools.ietf.org/html/rfc7348
*/
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"no outer UDP layer found");
if (!mask)
mask = &rte_flow_item_vxlan_gpe_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
- sizeof(struct rte_flow_item_vxlan_gpe), error);
+ sizeof(struct rte_flow_item_vxlan_gpe),
+ error);
if (ret < 0)
return ret;
if (spec) {
+ if (spec->protocol)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol"
+ " not supported");
memcpy(&id.vni[1], spec->vni, 3);
- vxlan_gpe.val.tunnel_id = id.vlan_id;
+ vlan_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
- vxlan_gpe.mask.tunnel_id = id.vlan_id;
- if (spec->protocol)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VxLAN-GPE protocol not supported");
- /* Remove unwanted bits from values. */
- vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
+ vlan_id &= id.vlan_id;
}
/*
* Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
@@ -1554,141 +1511,55 @@ mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
* before will also match this rule. To avoid such situation, VNI 0
* is currently refused.
*/
- if (!vxlan_gpe.val.tunnel_id)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (!vlan_id)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"VXLAN-GPE vni cannot be 0");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"VXLAN-GPE tunnel must be fully"
" defined");
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
- return size;
-}
-
-/**
- * Update the protocol in Verbs IPv4/IPv6 spec.
- *
- * @param[in, out] attr
- * Pointer to Verbs attributes structure.
- * @param[in] search
- * Specification type to search in order to update the IP protocol.
- * @param[in] protocol
- * Protocol value to set if none is present in the specification.
- */
-static void
-mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
- enum ibv_flow_spec_type search,
- uint8_t protocol)
-{
- unsigned int i;
- struct ibv_spec_header *hdr = (struct ibv_spec_header *)
- ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
-
- if (!attr)
- return;
- for (i = 0; i != attr->num_of_specs; ++i) {
- if (hdr->type == search) {
- union {
- struct ibv_flow_spec_ipv4_ext *ipv4;
- struct ibv_flow_spec_ipv6 *ipv6;
- } ip;
-
- switch (search) {
- case IBV_FLOW_SPEC_IPV4_EXT:
- ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
- if (!ip.ipv4->val.proto) {
- ip.ipv4->val.proto = protocol;
- ip.ipv4->mask.proto = 0xff;
- }
- break;
- case IBV_FLOW_SPEC_IPV6:
- ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
- if (!ip.ipv6->val.next_hdr) {
- ip.ipv6->val.next_hdr = protocol;
- ip.ipv6->mask.next_hdr = 0xff;
- }
- break;
- default:
- break;
- }
- break;
- }
- hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
- }
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * It will also update the previous L3 layer with the protocol value matching
- * the GRE.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate GRE item.
*
- * @param dev
- * Pointer to Ethernet device.
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit flags to mark detected items.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_gre(const struct rte_flow_item *item,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
{
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
- const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *spec __rte_unused = item->spec;
const struct rte_flow_item_gre *mask = item->mask;
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- unsigned int size = sizeof(struct ibv_flow_spec_gre);
- struct ibv_flow_spec_gre tunnel = {
- .type = IBV_FLOW_SPEC_GRE,
- .size = size,
- };
-#else
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel tunnel = {
- .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
-#endif
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
" with this GRE layer");
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"a tunnel is already present");
- if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 Layer is missing");
if (!mask)
mask = &rte_flow_item_gre_mask;
@@ -1698,92 +1569,50 @@ mlx5_flow_item_gre(const struct rte_flow_item *item,
sizeof(struct rte_flow_item_gre), error);
if (ret < 0)
return ret;
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- if (spec) {
- tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
- tunnel.val.protocol = spec->protocol;
- tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
- tunnel.mask.protocol = mask->protocol;
- /* Remove unwanted bits from values. */
- tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
- tunnel.val.protocol &= tunnel.mask.protocol;
- tunnel.val.key &= tunnel.mask.key;
- }
-#else
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
if (spec && (spec->protocol & mask->protocol))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"without MPLS support the"
" specification cannot be used for"
" filtering");
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
- if (size <= flow_size) {
- if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
- MLX5_IP_PROTOCOL_GRE);
- else
- mlx5_flow_item_gre_ip_protocol_update
- (verbs->attr, IBV_FLOW_SPEC_IPV6,
- MLX5_IP_PROTOCOL_GRE);
- mlx5_flow_spec_verbs_add(flow, &tunnel, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_GRE;
- return size;
+#endif
+ return 0;
}
/**
- * Convert the @p item into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
+ * Validate MPLS item.
*
* @param[in] item
* Item specification.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
* @param[out] error
* Pointer to error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p item has fully been converted,
- * otherwise another call with this returned memory size should be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
- struct rte_flow *flow __rte_unused,
- const size_t flow_size __rte_unused,
- struct rte_flow_error *error)
+int
+mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
+ uint64_t item_flags __rte_unused,
+ uint8_t target_protocol __rte_unused,
+ struct rte_flow_error *error)
{
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- const struct rte_flow_item_mpls *spec = item->spec;
const struct rte_flow_item_mpls *mask = item->mask;
- unsigned int size = sizeof(struct ibv_flow_spec_mpls);
- struct ibv_flow_spec_mpls mpls = {
- .type = IBV_FLOW_SPEC_MPLS,
- .size = size,
- };
int ret;
- if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
" with MPLS layer");
- /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
- if (flow->layers & MLX5_FLOW_LAYER_TUNNEL &&
- (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE)
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"a tunnel is already"
" present");
if (!mask)
@@ -1794,1056 +1623,298 @@ mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
sizeof(struct rte_flow_item_mpls), error);
if (ret < 0)
return ret;
- if (spec) {
- memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
- memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
- /* Remove unwanted bits from values. */
- mpls.val.label &= mpls.mask.label;
- }
- if (size <= flow_size) {
- mlx5_flow_spec_verbs_add(flow, &mpls, size);
- flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
- }
- flow->layers |= MLX5_FLOW_LAYER_MPLS;
- return size;
-#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ return 0;
+#endif
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
"MPLS is not supported by Verbs, please"
" update.");
}
-/**
- * Convert the @p pattern into a Verbs specifications after ensuring the NIC
- * will understand and process it correctly.
- * The conversion is performed item per item, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p pattern, unless an error is encountered.
- *
- * @param[in] pattern
- * Flow pattern.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @pattern has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
static int
-mlx5_flow_items(struct rte_eth_dev *dev,
- const struct rte_flow_item pattern[],
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+flow_null_validate(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
{
- int remain = flow_size;
- size_t size = 0;
-
- for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
- int ret = 0;
-
- switch (pattern->type) {
- case RTE_FLOW_ITEM_TYPE_VOID:
- break;
- case RTE_FLOW_ITEM_TYPE_ETH:
- ret = mlx5_flow_item_eth(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- ret = mlx5_flow_item_udp(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_item_vxlan(pattern, flow, remain,
- error);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
- remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- ret = mlx5_flow_item_gre(pattern, flow, remain, error);
- break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
- ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern,
- "item not supported");
- }
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
- }
- if (!flow->layers) {
- const struct rte_flow_item item = {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- };
-
- return mlx5_flow_item_eth(&item, flow, flow_size, error);
- }
- return size;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
-/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_action_drop(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+static struct mlx5_flow *
+flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ uint64_t *item_flags __rte_unused,
+ uint64_t *action_flags __rte_unused,
+ struct rte_flow_error *error __rte_unused)
{
- unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
- struct ibv_flow_spec_action_drop drop = {
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = size,
- };
-
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "drop is not compatible with"
- " flag/mark action");
- if (size < flow_size)
- mlx5_flow_spec_verbs_add(flow, &drop, size);
- flow->fate |= MLX5_FLOW_FATE_DROP;
- return size;
+ rte_errno = ENOTSUP;
+ return NULL;
}
-/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- *
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
static int
-mlx5_flow_action_queue(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+flow_null_translate(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_flow *dev_flow __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_flow_action_queue *queue = action->conf;
-
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (queue->index >= priv->rxqs_n)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &queue->index,
- "queue index out of range");
- if (!(*priv->rxqs)[queue->index])
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &queue->index,
- "queue is not configured");
- if (flow->queue)
- (*flow->queue)[0] = queue->index;
- flow->rss.queue_num = 1;
- flow->fate |= MLX5_FLOW_FATE_QUEUE;
- return 0;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
-/**
- * Ensure the @p action will be understood and used correctly by the NIC.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param action[in]
- * Pointer to flow actions array.
- * @param flow[in, out]
- * Pointer to the rte_flow structure.
- * @param error[in, out]
- * Pointer to error structure.
- *
- * @return
- * On success @p flow->queue array and @p flow->rss are filled and valid.
- * On error, a negative errno value is returned and rte_errno is set.
- */
static int
-mlx5_flow_action_rss(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+flow_null_apply(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ struct rte_flow_error *error __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_flow_action_rss *rss = action->conf;
- unsigned int i;
-
- if (flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "multiple fate actions are not"
- " supported");
- if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
- rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->func,
- "RSS hash function not supported");
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (rss->level > 2)
-#else
- if (rss->level > 1)
-#endif
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->level,
- "tunnel RSS is not supported");
- if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->key_len,
- "RSS hash key too small");
- if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->key_len,
- "RSS hash key too large");
- if (!rss->queue_num)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- rss,
- "no queues were provided for RSS");
- if (rss->queue_num > priv->config.ind_table_max_size)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue_num,
- "number of queues too large");
- if (rss->types & MLX5_RSS_HF_MASK)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->types,
- "some RSS protocols are not"
- " supported");
- for (i = 0; i != rss->queue_num; ++i) {
- if (rss->queue[i] >= priv->rxqs_n)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- rss,
- "queue index out of range");
- if (!(*priv->rxqs)[rss->queue[i]])
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &rss->queue[i],
- "queue is not configured");
- }
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
- rss->queue_num * sizeof(uint16_t));
- flow->rss.queue_num = rss->queue_num;
- memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
- flow->rss.types = rss->types;
- flow->rss.level = rss->level;
- flow->fate |= MLX5_FLOW_FATE_RSS;
- return 0;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
-/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_action_flag(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
-{
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (flow->modifier & MLX5_FLOW_MOD_FLAG)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flag action already present");
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flag is not compatible with drop"
- " action");
- if (flow->modifier & MLX5_FLOW_MOD_MARK)
- size = 0;
- else if (size <= flow_size && verbs)
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- flow->modifier |= MLX5_FLOW_MOD_FLAG;
- return size;
+static void
+flow_null_remove(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused)
+{
}
-/**
- * Update verbs specification to modify the flag to mark.
- *
- * @param[in, out] verbs
- * Pointer to the mlx5_flow_verbs structure.
- * @param[in] mark_id
- * Mark identifier to replace the flag.
- */
static void
-mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused)
{
- struct ibv_spec_header *hdr;
- int i;
-
- if (!verbs)
- return;
- /* Update Verbs specification. */
- hdr = (struct ibv_spec_header *)verbs->specs;
- if (!hdr)
- return;
- for (i = 0; i != verbs->attr->num_of_specs; ++i) {
- if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
- struct ibv_flow_spec_action_tag *t =
- (struct ibv_flow_spec_action_tag *)hdr;
-
- t->tag_id = mlx5_flow_mark_set(mark_id);
- }
- hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
- }
}
-/**
- * Convert the @p action into @p flow (or by updating the already present
- * Flag Verbs specification) after ensuring the NIC will understand and
- * process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param[in] action
- * Action configuration.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
static int
-mlx5_flow_action_mark(const struct rte_flow_action *action,
- struct rte_flow *flow, const size_t flow_size,
- struct rte_flow_error *error)
+flow_null_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error __rte_unused)
{
- const struct rte_flow_action_mark *mark = action->conf;
- unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
- struct ibv_flow_spec_action_tag tag = {
- .type = IBV_FLOW_SPEC_ACTION_TAG,
- .size = size,
- };
- struct mlx5_flow_verbs *verbs = flow->cur_verbs;
-
- if (!mark)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "configuration cannot be null");
- if (mark->id >= MLX5_FLOW_MARK_MAX)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- &mark->id,
- "mark id must in 0 <= id < "
- RTE_STR(MLX5_FLOW_MARK_MAX));
- if (flow->modifier & MLX5_FLOW_MOD_MARK)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "mark action already present");
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "mark is not compatible with drop"
- " action");
- if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
- mlx5_flow_verbs_mark_update(verbs, mark->id);
- size = 0;
- } else if (size <= flow_size) {
- tag.tag_id = mlx5_flow_mark_set(mark->id);
- mlx5_flow_spec_verbs_add(flow, &tag, size);
- }
- flow->modifier |= MLX5_FLOW_MOD_MARK;
- return size;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
+/* Void driver to protect from null pointer reference. */
+const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
+ .validate = flow_null_validate,
+ .prepare = flow_null_prepare,
+ .translate = flow_null_translate,
+ .apply = flow_null_apply,
+ .remove = flow_null_remove,
+ .destroy = flow_null_destroy,
+ .query = flow_null_query,
+};
+
/**
- * Convert the @p action into a Verbs specification after ensuring the NIC
- * will understand and process it correctly.
- * If the necessary size for the conversion is greater than the @p flow_size,
- * nothing is written in @p flow, the validation is still performed.
- *
- * @param action[in]
- * Action configuration.
- * @param flow[in, out]
- * Pointer to flow structure.
- * @param flow_size[in]
- * Size in bytes of the available space in @p flow, if too small, nothing is
- * written.
- * @param error[int, out]
- * Pointer to error structure.
+ * Select flow driver type according to flow attributes and device
+ * configuration.
+ *
+ * @param[in] dev
+ * Pointer to the dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p action has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
*/
-static int
-mlx5_flow_action_count(struct rte_eth_dev *dev,
- const struct rte_flow_action *action,
- struct rte_flow *flow,
- const size_t flow_size __rte_unused,
- struct rte_flow_error *error)
-{
- const struct rte_flow_action_count *count = action->conf;
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
- struct ibv_flow_spec_counter_action counter = {
- .type = IBV_FLOW_SPEC_ACTION_COUNT,
- .size = size,
- };
-#endif
+static enum mlx5_flow_drv_type
+flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
+{
+ struct priv *priv = dev->data->dev_private;
+ enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
- if (!flow->counter) {
- flow->counter = mlx5_flow_counter_new(dev, count->shared,
- count->id);
- if (!flow->counter)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "cannot get counter"
- " context.");
- }
- if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "flow counters are not supported.");
- flow->modifier |= MLX5_FLOW_MOD_COUNT;
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- counter.counter_set_handle = flow->counter->cs->handle;
- if (size <= flow_size)
- mlx5_flow_spec_verbs_add(flow, &counter, size);
- return size;
-#endif
- return 0;
+ if (attr->transfer)
+ type = MLX5_FLOW_TYPE_TCF;
+ else
+ type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+ MLX5_FLOW_TYPE_VERBS;
+ return type;
}
+#define flow_get_drv_ops(type) flow_drv_ops[type]
+
/**
- * Convert the @p action into @p flow after ensuring the NIC will understand
- * and process it correctly.
- * The conversion is performed action per action, each of them is written into
- * the @p flow if its size is lesser or equal to @p flow_size.
- * Validation and memory consumption computation are still performed until the
- * end of @p action, unless an error is encountered.
+ * Flow driver validation API. This abstracts calling driver specific functions.
+ * The type of flow driver is determined according to flow attributes.
*
* @param[in] dev
- * Pointer to Ethernet device structure.
+ * Pointer to the dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
* @param[in] actions
- * Pointer to flow actions array.
- * @param[in, out] flow
- * Pointer to the rte_flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
+ * Pointer to the list of actions.
* @param[out] error
- * Pointer to error structure.
+ * Pointer to the error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the @p actions has fully been
- * converted, otherwise another call with this returned memory size should
- * be done.
- * On error, a negative errno value is returned and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static int
-mlx5_flow_actions(struct rte_eth_dev *dev,
+static inline int
+flow_drv_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- struct rte_flow *flow, const size_t flow_size,
struct rte_flow_error *error)
{
- size_t size = 0;
- int remain = flow_size;
- int ret = 0;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_FLAG:
- ret = mlx5_flow_action_flag(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- ret = mlx5_flow_action_mark(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- ret = mlx5_flow_action_drop(actions, flow, remain,
- error);
- break;
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- ret = mlx5_flow_action_queue(dev, actions, flow, error);
- break;
- case RTE_FLOW_ACTION_TYPE_RSS:
- ret = mlx5_flow_action_rss(dev, actions, flow, error);
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = mlx5_flow_action_count(dev, actions, flow, remain,
- error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
- }
- if (ret < 0)
- return ret;
- if (remain > ret)
- remain -= ret;
- else
- remain = 0;
- size += ret;
- }
- if (!flow->fate)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no fate action found");
- return size;
+ fops = flow_get_drv_ops(type);
+ return fops->validate(dev, attr, items, actions, error);
}
/**
- * Validate flow rule and fill flow structure accordingly.
+ * Flow driver preparation API. This abstracts calling driver specific
+ * functions. Parent flow (rte_flow) should have driver type (drv_type). It
+ * calculates the size of memory required for device flow, allocates the memory,
+ * initializes the device flow and returns the pointer.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param[out] flow
- * Pointer to flow structure.
- * @param flow_size
- * Size of allocated space for @p flow.
* @param[in] attr
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
* @param[in] actions
- * Associated actions (list terminated by the END action).
+ * Pointer to the list of actions.
+ * @param[out] item_flags
+ * Pointer to bit mask of all items detected.
+ * @param[out] action_flags
+ * Pointer to bit mask of all actions detected.
* @param[out] error
- * Perform verbose error reporting if not NULL.
+ * Pointer to the error structure.
*
* @return
- * A positive value representing the size of the flow object in bytes
- * regardless of @p flow_size on success, a negative errno value otherwise
- * and rte_errno is set.
+ * Pointer to device flow on success, otherwise NULL and rte_ernno is set.
*/
-static int
-mlx5_flow_merge_switch(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- size_t flow_size,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
- uint16_t port_id[!n + n];
- struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
- size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
- unsigned int i;
- unsigned int own = 0;
- int ret;
-
- /* At least one port is needed when no switch domain is present. */
- if (!n) {
- n = 1;
- port_id[0] = dev->data->port_id;
- } else {
- n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
- }
- for (i = 0; i != n; ++i) {
- struct rte_eth_dev_info dev_info;
-
- rte_eth_dev_info_get(port_id[i], &dev_info);
- if (port_id[i] == dev->data->port_id)
- own = i;
- ptoi[i].port_id = port_id[i];
- ptoi[i].ifindex = dev_info.if_index;
- }
- /* Ensure first entry of ptoi[] is the current device. */
- if (own) {
- ptoi[n] = ptoi[0];
- ptoi[0] = ptoi[own];
- ptoi[own] = ptoi[n];
- }
- /* An entry with zero ifindex terminates ptoi[]. */
- ptoi[n].port_id = 0;
- ptoi[n].ifindex = 0;
- if (flow_size < off)
- flow_size = 0;
- ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
- flow_size ? flow_size - off : 0,
- ptoi, attr, pattern, actions, error);
- if (ret < 0)
- return ret;
- if (flow_size) {
- *flow = (struct rte_flow){
- .attributes = *attr,
- .nl_flow = (uint8_t *)flow + off,
- };
- /*
- * Generate a reasonably unique handle based on the address
- * of the target buffer.
- *
- * This is straightforward on 32-bit systems where the flow
- * pointer can be used directly. Otherwise, its least
- * significant part is taken after shifting it by the
- * previous power of two of the pointed buffer size.
- */
- if (sizeof(flow) <= 4)
- mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
- else
- mlx5_nl_flow_brand
- (flow->nl_flow,
- (uintptr_t)flow >>
- rte_log2_u32(rte_align32prevpow2(flow_size)));
- }
- return off + ret;
-}
-
-static unsigned int
-mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+static inline struct mlx5_flow *
+flow_drv_prepare(struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ uint64_t *item_flags,
+ uint64_t *action_flags,
+ struct rte_flow_error *error)
{
- const struct rte_flow_item *item;
- unsigned int has_vlan = 0;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- has_vlan = 1;
- break;
- }
- }
- if (has_vlan)
- return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
- MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
- return rss_level < 2 ? MLX5_EXPANSION_ROOT :
- MLX5_EXPANSION_ROOT_OUTER;
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->prepare(attr, items, actions, item_flags, action_flags,
+ error);
}
/**
- * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
- * after ensuring the NIC will understand and process it correctly.
- * The conversion is only performed item/action per item/action, each of
- * them is written into the @p flow if its size is lesser or equal to @p
- * flow_size.
- * Validation and memory consumption computation are still performed until the
- * end, unless an error is encountered.
+ * Flow driver translation API. This abstracts calling driver specific
+ * functions. Parent flow (rte_flow) should have driver type (drv_type). It
+ * translates a generic flow into a driver flow. flow_drv_prepare() must
+ * precede.
+ *
*
* @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[in] flow_size
- * Size in bytes of the available space in @p flow, if too small some
- * garbage may be present.
- * @param[in] attributes
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
+ * Pointer to the rte dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5 flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
* @param[in] actions
- * Associated actions (list terminated by the END action).
+ * Pointer to the list of actions.
* @param[out] error
- * Perform verbose error reporting if not NULL.
+ * Pointer to the error structure.
*
* @return
- * On success the number of bytes consumed/necessary, if the returned value
- * is lesser or equal to @p flow_size, the flow has fully been converted and
- * can be applied, otherwise another call with this returned memory size
- * should be done.
- * On error, a negative errno value is returned and rte_errno is set.
- */
-static int
-mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
- const size_t flow_size,
- const struct rte_flow_attr *attributes,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct rte_flow local_flow = { .layers = 0, };
- size_t size = sizeof(*flow);
- union {
- struct rte_flow_expand_rss buf;
- uint8_t buffer[2048];
- } expand_buffer;
- struct rte_flow_expand_rss *buf = &expand_buffer.buf;
- struct mlx5_flow_verbs *original_verbs = NULL;
- size_t original_verbs_size = 0;
- uint32_t original_layers = 0;
- int expanded_pattern_idx = 0;
- int ret;
- uint32_t i;
-
- if (attributes->transfer)
- return mlx5_flow_merge_switch(dev, flow, flow_size,
- attributes, pattern,
- actions, error);
- if (size > flow_size)
- flow = &local_flow;
- ret = mlx5_flow_attributes(dev, attributes, flow, error);
- if (ret < 0)
- return ret;
- ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
- if (ret < 0)
- return ret;
- if (local_flow.rss.types) {
- unsigned int graph_root;
-
- graph_root = mlx5_find_graph_root(pattern,
- local_flow.rss.level);
- ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
- pattern, local_flow.rss.types,
- mlx5_support_expansion,
- graph_root);
- assert(ret > 0 &&
- (unsigned int)ret < sizeof(expand_buffer.buffer));
- } else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)pattern;
- }
- size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
- sizeof(void *));
- if (size <= flow_size)
- flow->queue = (void *)(flow + 1);
- LIST_INIT(&flow->verbs);
- flow->layers = 0;
- flow->modifier = 0;
- flow->fate = 0;
- for (i = 0; i != buf->entries; ++i) {
- size_t off = size;
- size_t off2;
-
- flow->layers = original_layers;
- size += sizeof(struct ibv_flow_attr) +
- sizeof(struct mlx5_flow_verbs);
- off2 = size;
- if (size < flow_size) {
- flow->cur_verbs = (void *)((uintptr_t)flow + off);
- flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
- flow->cur_verbs->specs =
- (void *)(flow->cur_verbs->attr + 1);
- }
- /* First iteration convert the pattern into Verbs. */
- if (i == 0) {
- /* Actions don't need to be converted several time. */
- ret = mlx5_flow_actions(dev, actions, flow,
- (size < flow_size) ?
- flow_size - size : 0,
- error);
- if (ret < 0)
- return ret;
- size += ret;
- } else {
- /*
- * Next iteration means the pattern has already been
- * converted and an expansion is necessary to match
- * the user RSS request. For that only the expanded
- * items will be converted, the common part with the
- * user pattern are just copied into the next buffer
- * zone.
- */
- size += original_verbs_size;
- if (size < flow_size) {
- rte_memcpy(flow->cur_verbs->attr,
- original_verbs->attr,
- original_verbs_size +
- sizeof(struct ibv_flow_attr));
- flow->cur_verbs->size = original_verbs_size;
- }
- }
- ret = mlx5_flow_items
- (dev,
- (const struct rte_flow_item *)
- &buf->entry[i].pattern[expanded_pattern_idx],
- flow,
- (size < flow_size) ? flow_size - size : 0, error);
- if (ret < 0)
- return ret;
- size += ret;
- if (size <= flow_size) {
- mlx5_flow_adjust_priority(dev, flow);
- LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
- }
- /*
- * Keep a pointer of the first verbs conversion and the layers
- * it has encountered.
- */
- if (i == 0) {
- original_verbs = flow->cur_verbs;
- original_verbs_size = size - off2;
- original_layers = flow->layers;
- /*
- * move the index of the expanded pattern to the
- * first item not addressed yet.
- */
- if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
- expanded_pattern_idx++;
- } else {
- const struct rte_flow_item *item = pattern;
-
- for (item = pattern;
- item->type != RTE_FLOW_ITEM_TYPE_END;
- ++item)
- expanded_pattern_idx++;
- }
- }
- }
- /* Restore the origin layers in the flow. */
- flow->layers = original_layers;
- return size;
-}
-
-/**
- * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
- * if several tunnel rules are used on this queue, the tunnel ptype will be
- * cleared.
- *
- * @param rxq_ctrl
- * Rx queue to update.
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
*/
-static void
-mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
+static inline int
+flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
- unsigned int i;
- uint32_t tunnel_ptype = 0;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
- /* Look up for the ptype to use. */
- for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
- if (!rxq_ctrl->flow_tunnels_n[i])
- continue;
- if (!tunnel_ptype) {
- tunnel_ptype = tunnels_info[i].ptype;
- } else {
- tunnel_ptype = 0;
- break;
- }
- }
- rxq_ctrl->rxq.tunnel = tunnel_ptype;
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->translate(dev, dev_flow, attr, items, actions, error);
}
/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
+ * Flow driver apply API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It applies
+ * translated driver flows on to device. flow_drv_translate() must precede.
*
* @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] flow
+ * Pointer to Ethernet device structure.
+ * @param[in, out] flow
* Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static void
-mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+static inline int
+flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
- if (tunnel) {
- unsigned int j;
-
- /* Increase the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]++;
- break;
- }
- }
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
- }
- }
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ return fops->apply(dev, flow, error);
}
/**
- * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
- * @p flow if no other flow uses it with the same kind of request.
+ * Flow driver remove API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
+ * on device. All the resources of the flow should be freed by calling
+ * flow_dv_destroy().
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
- * @param[in] flow
- * Pointer to the flow.
+ * @param[in, out] flow
+ * Pointer to flow structure.
*/
-static void
-mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+static inline void
+flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
- const int mark = !!(flow->modifier &
- (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
- const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
- unsigned int i;
-
- assert(dev->data->dev_started);
- for (i = 0; i != flow->rss.queue_num; ++i) {
- int idx = (*flow->queue)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
-
- if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
- if (tunnel) {
- unsigned int j;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- /* Decrease the counter matching the flow. */
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
- if ((tunnels_info[j].tunnel & flow->layers) ==
- tunnels_info[j].tunnel) {
- rxq_ctrl->flow_tunnels_n[j]--;
- break;
- }
- }
- mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
- }
- }
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ fops->remove(dev, flow);
}
/**
- * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ * Flow driver destroy API. This abstracts calling driver specific functions.
+ * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
+ * on device and releases resources of the flow.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
*/
-static void
-mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
+static inline void
+flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
-
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
- unsigned int j;
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type type = flow->drv_type;
- if (!(*priv->rxqs)[i])
- continue;
- rxq_ctrl = container_of((*priv->rxqs)[i],
- struct mlx5_rxq_ctrl, rxq);
- rxq_ctrl->flow_mark_n = 0;
- rxq_ctrl->rxq.mark = 0;
- for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
- rxq_ctrl->flow_tunnels_n[j] = 0;
- rxq_ctrl->rxq.tunnel = 0;
- }
+ assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(type);
+ fops->destroy(dev, flow);
}
/**
@@ -2859,134 +1930,55 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+ int ret;
+ ret = flow_drv_validate(dev, attr, items, actions, error);
if (ret < 0)
return ret;
return 0;
}
/**
- * Remove the flow.
+ * Get RSS action from the action list.
*
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to flow structure.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ *
+ * @return
+ * Pointer to the RSS action if exist, else return NULL.
*/
-static void
-mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+static const struct rte_flow_action_rss*
+flow_get_rss_action(const struct rte_flow_action actions[])
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_verbs *verbs;
-
- if (flow->nl_flow && priv->mnl_socket)
- mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->flow) {
- claim_zero(mlx5_glue->destroy_flow(verbs->flow));
- verbs->flow = NULL;
- }
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ return (const struct rte_flow_action_rss *)
+ actions->conf;
+ default:
+ break;
}
}
- if (flow->counter) {
- mlx5_flow_counter_release(flow->counter);
- flow->counter = NULL;
- }
+ return NULL;
}
-/**
- * Apply the flow.
- *
- * @param[in] dev
- * Pointer to Ethernet device structure.
- * @param[in, out] flow
- * Pointer to flow structure.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
- struct rte_flow_error *error)
+static unsigned int
+find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_verbs *verbs;
- int err;
-
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (flow->fate & MLX5_FLOW_FATE_DROP) {
- verbs->hrxq = mlx5_hrxq_drop_new(dev);
- if (!verbs->hrxq) {
- rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get drop hash queue");
- goto error;
- }
- } else {
- struct mlx5_hrxq *hrxq;
-
- hrxq = mlx5_hrxq_get(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num);
- if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->key,
- MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
- flow->rss.queue_num,
- !!(flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
- if (!hrxq) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot get hash queue");
- goto error;
- }
- verbs->hrxq = hrxq;
- }
- verbs->flow =
- mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
- if (!verbs->flow) {
- rte_flow_error_set(error, errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "hardware refuses to create flow");
- goto error;
- }
- }
- if (flow->nl_flow &&
- priv->mnl_socket &&
- mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
- goto error;
- return 0;
-error:
- err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(verbs, &flow->verbs, next) {
- if (verbs->hrxq) {
- if (flow->fate & MLX5_FLOW_FATE_DROP)
- mlx5_hrxq_drop_release(dev);
- else
- mlx5_hrxq_release(dev, verbs->hrxq);
- verbs->hrxq = NULL;
+ const struct rte_flow_item *item;
+ unsigned int has_vlan = 0;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ has_vlan = 1;
+ break;
}
}
- rte_errno = err; /* Restore rte_errno. */
- return -rte_errno;
+ if (has_vlan)
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+ MLX5_EXPANSION_ROOT_OUTER;
}
/**
@@ -3009,50 +2001,90 @@ error:
* A flow on success, NULL otherwise and rte_errno is set.
*/
static struct rte_flow *
-mlx5_flow_list_create(struct rte_eth_dev *dev,
- struct mlx5_flows *list,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct rte_flow *flow = NULL;
- size_t size = 0;
+ struct mlx5_flow *dev_flow;
+ uint64_t action_flags = 0;
+ uint64_t item_flags = 0;
+ const struct rte_flow_action_rss *rss;
+ union {
+ struct rte_flow_expand_rss buf;
+ uint8_t buffer[2048];
+ } expand_buffer;
+ struct rte_flow_expand_rss *buf = &expand_buffer.buf;
int ret;
+ uint32_t i;
+ uint32_t flow_size;
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ ret = flow_drv_validate(dev, attr, items, actions, error);
if (ret < 0)
return NULL;
- size = ret;
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "not enough memory to create flow");
- return NULL;
+ flow_size = sizeof(struct rte_flow);
+ rss = flow_get_rss_action(actions);
+ if (rss)
+ flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ else
+ flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
+ flow = rte_calloc(__func__, 1, flow_size, 0);
+ flow->drv_type = flow_get_drv_type(dev, attr);
+ assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+ flow->drv_type < MLX5_FLOW_TYPE_MAX);
+ flow->queue = (void *)(flow + 1);
+ LIST_INIT(&flow->dev_flows);
+ if (rss && rss->types) {
+ unsigned int graph_root;
+
+ graph_root = find_graph_root(items, rss->level);
+ ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+ items, rss->types,
+ mlx5_support_expansion,
+ graph_root);
+ assert(ret > 0 &&
+ (unsigned int)ret < sizeof(expand_buffer.buffer));
+ } else {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)items;
}
- ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
- if (ret < 0) {
- rte_free(flow);
- return NULL;
+ for (i = 0; i < buf->entries; ++i) {
+ dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
+ actions, &item_flags, &action_flags,
+ error);
+ if (!dev_flow)
+ goto error;
+ dev_flow->flow = flow;
+ dev_flow->layers = item_flags;
+ /* Store actions once as expanded flows have same actions. */
+ if (i == 0)
+ flow->actions = action_flags;
+ assert(flow->actions == action_flags);
+ LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+ ret = flow_drv_translate(dev, dev_flow, attr,
+ buf->entry[i].pattern,
+ actions, error);
+ if (ret < 0)
+ goto error;
}
- assert((size_t)ret == size);
if (dev->data->dev_started) {
- ret = mlx5_flow_apply(dev, flow, error);
- if (ret < 0) {
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (flow) {
- mlx5_flow_remove(dev, flow);
- rte_free(flow);
- }
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
- }
+ ret = flow_drv_apply(dev, flow, error);
+ if (ret < 0)
+ goto error;
}
TAILQ_INSERT_TAIL(list, flow, next);
- mlx5_flow_rxq_flags_set(dev, flow);
+ flow_rxq_flags_set(dev, flow);
return flow;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ assert(flow);
+ flow_drv_destroy(dev, flow);
+ rte_free(flow);
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
}
/**
@@ -3068,9 +2100,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- return mlx5_flow_list_create
- (dev, &((struct priv *)dev->data->dev_private)->flows,
- attr, items, actions, error);
+ return flow_list_create(dev,
+ &((struct priv *)dev->data->dev_private)->flows,
+ attr, items, actions, error);
}
/**
@@ -3084,17 +2116,17 @@ mlx5_flow_create(struct rte_eth_dev *dev,
* Flow to destroy.
*/
static void
-mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
- struct rte_flow *flow)
+flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow)
{
- mlx5_flow_remove(dev, flow);
+ flow_drv_destroy(dev, flow);
TAILQ_REMOVE(list, flow, next);
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
*/
if (dev->data->dev_started)
- mlx5_flow_rxq_flags_trim(dev, flow);
+ flow_rxq_flags_trim(dev, flow);
rte_free(flow);
}
@@ -3113,7 +2145,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
struct rte_flow *flow;
flow = TAILQ_FIRST(list);
- mlx5_flow_list_destroy(dev, list, flow);
+ flow_list_destroy(dev, list, flow);
}
}
@@ -3131,8 +2163,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
struct rte_flow *flow;
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
- mlx5_flow_remove(dev, flow);
- mlx5_flow_rxq_flags_clear(dev);
+ flow_drv_remove(dev, flow);
+ flow_rxq_flags_clear(dev);
}
/**
@@ -3154,10 +2186,10 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
int ret = 0;
TAILQ_FOREACH(flow, list, next) {
- ret = mlx5_flow_apply(dev, flow, &error);
+ ret = flow_drv_apply(dev, flow, &error);
if (ret < 0)
goto error;
- mlx5_flow_rxq_flags_set(dev, flow);
+ flow_rxq_flags_set(dev, flow);
}
return 0;
error:
@@ -3228,7 +2260,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
},
{
.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
- RTE_FLOW_ITEM_TYPE_END,
+ RTE_FLOW_ITEM_TYPE_END,
.spec = vlan_spec,
.last = NULL,
.mask = vlan_mask,
@@ -3266,8 +2298,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
}
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
- flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
- actions, &error);
+ flow = flow_list_create(dev, &priv->ctrl_flows,
+ &attr, items, actions, &error);
if (!flow)
return -rte_errno;
return 0;
@@ -3307,7 +2339,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- mlx5_flow_list_destroy(dev, &priv->flows, flow);
+ flow_list_destroy(dev, &priv->flows, flow);
return 0;
}
@@ -3356,92 +2388,45 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
}
/**
- * Query flow counter.
- *
- * @param flow
- * Pointer to the flow.
+ * Query a flow.
*
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * @see rte_flow_query()
+ * @see rte_flow_ops
*/
static int
-mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
- void *data __rte_unused,
- struct rte_flow_error *error)
-{
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
- struct rte_flow_query_count *qc = data;
- uint64_t counters[2] = {0, 0};
- struct ibv_query_counter_set_attr query_cs_attr = {
- .cs = flow->counter->cs,
- .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
- };
- struct ibv_counter_set_data query_out = {
- .out = counters,
- .outlen = 2 * sizeof(uint64_t),
- };
- int err = mlx5_glue->query_counter_set(&query_cs_attr,
- &query_out);
+flow_drv_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type ftype = flow->drv_type;
- if (err)
- return rte_flow_error_set
- (error, err,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot read counter");
- qc->hits_set = 1;
- qc->bytes_set = 1;
- qc->hits = counters[0] - flow->counter->hits;
- qc->bytes = counters[1] - flow->counter->bytes;
- if (qc->reset) {
- flow->counter->hits = counters[0];
- flow->counter->bytes = counters[1];
- }
- return 0;
- }
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "flow does not have counter");
-#endif
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "counters are not available");
+ assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(ftype);
+
+ return fops->query(dev, flow, actions, data, error);
}
/**
- * Query a flows.
+ * Query a flow.
*
* @see rte_flow_query()
* @see rte_flow_ops
*/
int
-mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
+mlx5_flow_query(struct rte_eth_dev *dev,
struct rte_flow *flow,
const struct rte_flow_action *actions,
void *data,
struct rte_flow_error *error)
{
- int ret = 0;
+ int ret;
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_VOID:
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = mlx5_flow_query_count(flow, data, error);
- break;
- default:
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
- }
- if (ret < 0)
- return ret;
- }
+ ret = flow_drv_query(dev, flow, actions, data, error);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -3511,7 +2496,6 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
.dst_addr = input->flow.ip4_flow.dst_ip,
.time_to_live = input->flow.ip4_flow.ttl,
.type_of_service = input->flow.ip4_flow.tos,
- .next_proto_id = input->flow.ip4_flow.proto,
};
attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
.src_addr = mask->ipv4_mask.src_ip,
@@ -3663,9 +2647,8 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
return ret;
- flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
- attributes.items, attributes.actions,
- &error);
+ flow = flow_list_create(dev, &priv->flows, &attributes.attr,
+ attributes.items, attributes.actions, &error);
if (flow) {
DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
(void *)flow);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
new file mode 100644
index 00000000..61299d66
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_FLOW_H_
+#define RTE_PMD_MLX5_FLOW_H_
+
+#include <netinet/in.h>
+#include <sys/queue.h>
+#include <stdalign.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+/* Pattern outer Layer bits. */
+#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
+#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
+#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
+#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
+
+/* Pattern inner Layer bits. */
+#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
+#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
+#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
+#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
+#define MLX5_FLOW_LAYER_GRE (1u << 14)
+#define MLX5_FLOW_LAYER_MPLS (1u << 15)
+
+/* General pattern items bits. */
+#define MLX5_FLOW_ITEM_METADATA (1u << 16)
+
+/* Outer Masks. */
+#define MLX5_FLOW_LAYER_OUTER_L3 \
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+#define MLX5_FLOW_LAYER_OUTER_L4 \
+ (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+#define MLX5_FLOW_LAYER_OUTER \
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
+ MLX5_FLOW_LAYER_OUTER_L4)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
+
+/* Inner Masks. */
+#define MLX5_FLOW_LAYER_INNER_L3 \
+ (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
+#define MLX5_FLOW_LAYER_INNER_L4 \
+ (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
+#define MLX5_FLOW_LAYER_INNER \
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
+ MLX5_FLOW_LAYER_INNER_L4)
+
+/* Actions */
+#define MLX5_FLOW_ACTION_DROP (1u << 0)
+#define MLX5_FLOW_ACTION_QUEUE (1u << 1)
+#define MLX5_FLOW_ACTION_RSS (1u << 2)
+#define MLX5_FLOW_ACTION_FLAG (1u << 3)
+#define MLX5_FLOW_ACTION_MARK (1u << 4)
+#define MLX5_FLOW_ACTION_COUNT (1u << 5)
+#define MLX5_FLOW_ACTION_PORT_ID (1u << 6)
+#define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7)
+#define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8)
+#define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9)
+#define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10)
+#define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11)
+#define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12)
+#define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13)
+#define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14)
+#define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15)
+#define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16)
+#define MLX5_FLOW_ACTION_JUMP (1u << 17)
+#define MLX5_FLOW_ACTION_SET_TTL (1u << 18)
+#define MLX5_FLOW_ACTION_DEC_TTL (1u << 19)
+#define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20)
+#define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21)
+
+#define MLX5_FLOW_FATE_ACTIONS \
+ (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)
+
+#ifndef IPPROTO_MPLS
+#define IPPROTO_MPLS 137
+#endif
+
+/* UDP port numbers for VxLAN. */
+#define MLX5_UDP_PORT_VXLAN 4789
+#define MLX5_UDP_PORT_VXLAN_GPE 4790
+
+/* Priority reserved for default flows. */
+#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+
+/*
+ * Number of sub priorities.
+ * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
+ * matching on the NIC (firmware dependent) L4 most have the higher priority
+ * followed by L3 and ending with L2.
+ */
+#define MLX5_PRIORITY_MAP_L2 2
+#define MLX5_PRIORITY_MAP_L3 1
+#define MLX5_PRIORITY_MAP_L4 0
+#define MLX5_PRIORITY_MAP_MAX 3
+
+/* Valid layer type for IPV4 RSS. */
+#define MLX5_IPV4_LAYER_TYPES \
+ (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+
+/* IBV hash source bits for IPV4. */
+#define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
+
+/* Valid layer type for IPV6 RSS. */
+#define MLX5_IPV6_LAYER_TYPES \
+ (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER)
+
+/* IBV hash source bits for IPV6. */
+#define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
+
+/* Max number of actions per DV flow. */
+#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
+
+enum mlx5_flow_drv_type {
+ MLX5_FLOW_TYPE_MIN,
+ MLX5_FLOW_TYPE_DV,
+ MLX5_FLOW_TYPE_TCF,
+ MLX5_FLOW_TYPE_VERBS,
+ MLX5_FLOW_TYPE_MAX,
+};
+
+/* Matcher PRM representation */
+struct mlx5_flow_dv_match_params {
+ size_t size;
+ /**< Size of match value. Do NOT split size and key! */
+ uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
+ /**< Matcher value. This value is used as the mask or as a key. */
+};
+
+#define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
+
+/* Matcher structure. */
+struct mlx5_flow_dv_matcher {
+ LIST_ENTRY(mlx5_flow_dv_matcher) next;
+ /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /**< Reference counter. */
+ void *matcher_object; /**< Pointer to DV matcher */
+ uint16_t crc; /**< CRC of key. */
+ uint16_t priority; /**< Priority of matcher. */
+ uint8_t egress; /**< Egress matcher. */
+ struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
+};
+
+/* DV flows structure. */
+struct mlx5_flow_dv {
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+ /* Flow DV api: */
+ struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
+ struct mlx5_flow_dv_match_params value;
+ /**< Holds the value that the packet is compared to. */
+ struct ibv_flow *flow; /**< Installed flow. */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5dv_flow_action_attr actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS];
+ /**< Action list. */
+#endif
+ int actions_n; /**< number of actions. */
+};
+
+/** Linux TC flower driver for E-Switch flow. */
+struct mlx5_flow_tcf {
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+};
+
+/* Verbs specification header. */
+struct ibv_spec_header {
+ enum ibv_flow_spec_type type;
+ uint16_t size;
+};
+
+/** Handles information leading to a drop fate. */
+struct mlx5_flow_verbs {
+ LIST_ENTRY(mlx5_flow_verbs) next;
+ unsigned int size; /**< Size of the attribute. */
+ struct {
+ struct ibv_flow_attr *attr;
+ /**< Pointer to the Specification buffer. */
+ uint8_t *specs; /**< Pointer to the specifications. */
+ };
+ struct ibv_flow *flow; /**< Verbs flow pointer. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+ uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+};
+
+/** Device flow structure. */
+struct mlx5_flow {
+ LIST_ENTRY(mlx5_flow) next;
+ struct rte_flow *flow; /**< Pointer to the main flow. */
+ uint64_t layers;
+ /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
+ union {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5_flow_dv dv;
+#endif
+ struct mlx5_flow_tcf tcf;
+ struct mlx5_flow_verbs verbs;
+ };
+};
+
+/* Counters information. */
+struct mlx5_flow_counter {
+ LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
+ uint32_t shared:1; /**< Share counter ID with other flow rules. */
+ uint32_t ref_cnt:31; /**< Reference counter. */
+ uint32_t id; /**< Counter ID. */
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
+ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
+#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ struct ibv_counters *cs; /**< Holds the counters for the rule. */
+#endif
+ uint64_t hits; /**< Number of packets matched by the rule. */
+ uint64_t bytes; /**< Number of bytes matched by the rule. */
+};
+
+/* Flow structure. */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ enum mlx5_flow_drv_type drv_type; /**< Drvier type. */
+ struct mlx5_flow_counter *counter; /**< Holds flow counter. */
+ struct rte_flow_action_rss rss;/**< RSS context. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+ LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
+ /**< Device flows that are part of the flow. */
+ uint64_t actions;
+ /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
+};
+typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
+ (const struct rte_flow_attr *attr, const struct rte_flow_item items[],
+ const struct rte_flow_action actions[], uint64_t *item_flags,
+ uint64_t *action_flags, struct rte_flow_error *error);
+typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error);
+typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow);
+typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow);
+typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error);
+struct mlx5_flow_driver_ops {
+ mlx5_flow_validate_t validate;
+ mlx5_flow_prepare_t prepare;
+ mlx5_flow_translate_t translate;
+ mlx5_flow_apply_t apply;
+ mlx5_flow_remove_t remove;
+ mlx5_flow_destroy_t destroy;
+ mlx5_flow_query_t query;
+};
+
+/* mlx5_flow.c */
+
+uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel,
+ uint64_t layer_types,
+ uint64_t hash_fields);
+uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
+ uint32_t subpriority);
+int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_action_drop(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_action_flag(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
+ uint64_t action_flags,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow_error *error);
+int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+ const uint8_t *mask,
+ const uint8_t *nic_mask,
+ unsigned int size,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
+ int64_t item_flags,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_mpls(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ const struct rte_flow_item_tcp *flow_mask,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
+ int64_t item_flags,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_flow_error *error);
+int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+
+/* mlx5_flow_tcf.c */
+
+int mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
+ unsigned int ifindex, struct rte_flow_error *error);
+struct mlx5_flow_tcf_context *mlx5_flow_tcf_context_create(void);
+void mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx);
+
+#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
new file mode 100644
index 00000000..8f729f44
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -0,0 +1,1492 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <sys/queue.h>
+#include <stdalign.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+
+#include "mlx5.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+#include "mlx5_glue.h"
+#include "mlx5_flow.h"
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+
+/**
+ * Validate META item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_meta(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_meta *spec = item->spec;
+ const struct rte_flow_item_meta *mask = item->mask;
+ const struct rte_flow_item_meta nic_mask = {
+ .data = RTE_BE32(UINT32_MAX)
+ };
+ int ret;
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
+ return rte_flow_error_set(error, EPERM,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "match on metadata offload "
+ "configuration is off for this port");
+ if (!spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "data cannot be empty");
+ if (!spec->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ NULL,
+ "data cannot be zero");
+ if (!mask)
+ mask = &rte_flow_item_meta_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_meta),
+ error);
+ if (ret < 0)
+ return ret;
+ if (attr->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "pattern not supported for ingress");
+ return 0;
+}
+
+/**
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to dev struct.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint32_t priority_max = priv->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priority out of range");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ if (!(attributes->egress ^ attributes->ingress))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "must specify exactly one of "
+ "ingress or egress");
+ return 0;
+}
+
+/**
+ * Internal validation function. For validating both actions and items.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint64_t action_flags = 0;
+ uint64_t item_flags = 0;
+ int tunnel = 0;
+ uint8_t next_protocol = 0xff;
+ int actions_n = 0;
+
+ if (items == NULL)
+ return -1;
+ ret = flow_dv_validate_attributes(dev, attr, error);
+ if (ret < 0)
+ return ret;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_validate_item_vlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &rte_flow_item_tcp_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5_flow_validate_item_gre(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_validate_item_vxlan_gpe(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ ret = flow_dv_validate_item_meta(dev, items, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_ITEM_METADATA;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "too many actions");
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = mlx5_flow_validate_action_flag(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = mlx5_flow_validate_action_mark(actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_validate_action_drop(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(actions,
+ action_flags, dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mlx5_flow_validate_action_rss(actions,
+ action_flags, dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_validate_action_count(dev, attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "no fate action is found");
+ return 0;
+}
+
+/**
+ * Internal preparation function. Allocates the DV flow size,
+ * this size is constant.
+ *
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] item_flags
+ * Pointer to bit mask of all items detected.
+ * @param[out] action_flags
+ * Pointer to bit mask of all actions detected.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to mlx5_flow object on success,
+ * otherwise NULL and rte_ernno is set.
+ */
+static struct mlx5_flow *
+flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ uint64_t *item_flags __rte_unused,
+ uint64_t *action_flags __rte_unused,
+ struct rte_flow_error *error)
+{
+ uint32_t size = sizeof(struct mlx5_flow);
+ struct mlx5_flow *flow;
+
+ flow = rte_calloc(__func__, 1, size, 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to create flow");
+ return NULL;
+ }
+ flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
+ return flow;
+}
+
+/**
+ * Add Ethernet item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_eth(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_eth *eth_m = item->mask;
+ const struct rte_flow_item_eth *eth_v = item->spec;
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ void *headers_m;
+ void *headers_v;
+ char *l24_v;
+ unsigned int i;
+
+ if (!eth_v)
+ return;
+ if (!eth_m)
+ eth_m = &nic_mask;
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
+ &eth_m->dst, sizeof(eth_m->dst));
+ /* The value must be in the range of the mask. */
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
+ for (i = 0; i < sizeof(eth_m->dst); ++i)
+ l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
+ &eth_m->src, sizeof(eth_m->src));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
+ /* The value must be in the range of the mask. */
+ for (i = 0; i < sizeof(eth_m->dst); ++i)
+ l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+}
+
+/**
+ * Add VLAN item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_vlan(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_vlan *vlan_m = item->mask;
+ const struct rte_flow_item_vlan *vlan_v = item->spec;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(0x0fff),
+ .inner_type = RTE_BE16(0xffff),
+ };
+ void *headers_m;
+ void *headers_v;
+ uint16_t tci_m;
+ uint16_t tci_v;
+
+ if (!vlan_v)
+ return;
+ if (!vlan_m)
+ vlan_m = &nic_mask;
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ tci_m = rte_be_to_cpu_16(vlan_m->tci);
+ tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+}
+
+/**
+ * Add IPV4 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_ipv4(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
+ const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+ char *l24_m;
+ char *l24_v;
+ uint8_t tos;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
+ if (!ipv4_v)
+ return;
+ if (!ipv4_m)
+ ipv4_m = &nic_mask;
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
+ *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
+ *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
+ tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
+ ipv4_m->hdr.type_of_service);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
+ ipv4_m->hdr.type_of_service >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv4_m->hdr.next_proto_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
+}
+
+/**
+ * Add IPV6 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_ipv6(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
+ const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ char *l24_m;
+ char *l24_v;
+ uint32_t vtc_m;
+ uint32_t vtc_v;
+ int i;
+ int size;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
+ if (!ipv6_v)
+ return;
+ if (!ipv6_m)
+ ipv6_m = &nic_mask;
+ size = sizeof(ipv6_m->hdr.dst_addr);
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
+ for (i = 0; i < size; ++i)
+ l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ memcpy(l24_m, ipv6_m->hdr.src_addr, size);
+ for (i = 0; i < size; ++i)
+ l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
+ /* TOS. */
+ vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
+ vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
+ /* Label. */
+ if (inner) {
+ MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
+ vtc_m);
+ MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
+ vtc_v);
+ } else {
+ MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
+ vtc_m);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
+ vtc_v);
+ }
+ /* Protocol. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv6_m->hdr.proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv6_v->hdr.proto & ipv6_m->hdr.proto);
+}
+
+/**
+ * Add TCP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_tcp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_tcp *tcp_m = item->mask;
+ const struct rte_flow_item_tcp *tcp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
+ if (!tcp_v)
+ return;
+ if (!tcp_m)
+ tcp_m = &rte_flow_item_tcp_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
+ rte_be_to_cpu_16(tcp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
+ rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
+ rte_be_to_cpu_16(tcp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
+ rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
+}
+
+/**
+ * Add UDP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_udp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_udp *udp_m = item->mask;
+ const struct rte_flow_item_udp *udp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+ if (!udp_v)
+ return;
+ if (!udp_m)
+ udp_m = &rte_flow_item_udp_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
+ rte_be_to_cpu_16(udp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+ rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ rte_be_to_cpu_16(udp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
+}
+
+/**
+ * Add GRE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gre(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_gre *gre_m = item->mask;
+ const struct rte_flow_item_gre *gre_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
+ if (!gre_v)
+ return;
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+ rte_be_to_cpu_16(gre_m->protocol));
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+}
+
+/**
+ * Add NVGRE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_nvgre(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_nvgre *nvgre_m = item->mask;
+ const struct rte_flow_item_nvgre *nvgre_v = item->spec;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ const char *tni_flow_id_m = (const char *)nvgre_m->tni;
+ const char *tni_flow_id_v = (const char *)nvgre_v->tni;
+ char *gre_key_m;
+ char *gre_key_v;
+ int size;
+ int i;
+
+ flow_dv_translate_item_gre(matcher, key, item, inner);
+ if (!nvgre_v)
+ return;
+ if (!nvgre_m)
+ nvgre_m = &rte_flow_item_nvgre_mask;
+ size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
+ gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
+ gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
+ memcpy(gre_key_m, tni_flow_id_m, size);
+ for (i = 0; i < size; ++i)
+ gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
+}
+
+/**
+ * Add VXLAN item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_vxlan(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_vxlan *vxlan_m = item->mask;
+ const struct rte_flow_item_vxlan *vxlan_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ char *vni_m;
+ char *vni_v;
+ uint16_t dport;
+ int size;
+ int i;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
+ MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!vxlan_v)
+ return;
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_mask;
+ size = sizeof(vxlan_m->vni);
+ vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
+ vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
+ memcpy(vni_m, vxlan_m->vni, size);
+ for (i = 0; i < size; ++i)
+ vni_v[i] = vni_m[i] & vxlan_v->vni[i];
+}
+
+/**
+ * Add META item to matcher
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_meta(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_meta *meta_m;
+ const struct rte_flow_item_meta *meta_v;
+ void *misc2_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
+ void *misc2_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+
+ meta_m = (const void *)item->mask;
+ if (!meta_m)
+ meta_m = &rte_flow_item_meta_mask;
+ meta_v = (const void *)item->spec;
+ if (meta_v) {
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
+ rte_be_to_cpu_32(meta_m->data));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
+ rte_be_to_cpu_32(meta_v->data & meta_m->data));
+ }
+}
+
+/**
+ * Update the matcher and the value based the selected item.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_create_item(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ struct mlx5_flow *dev_flow,
+ int inner)
+{
+ struct mlx5_flow_dv_matcher *tmatcher = matcher;
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
+ inner);
+ tmatcher->priority = MLX5_PRIORITY_MAP_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
+ inner);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
+ inner);
+ tmatcher->priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, inner,
+ MLX5_IPV4_LAYER_TYPES,
+ MLX5_IPV4_IBV_RX_HASH);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
+ inner);
+ tmatcher->priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, inner,
+ MLX5_IPV6_LAYER_TYPES,
+ MLX5_IPV6_IBV_RX_HASH);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
+ inner);
+ tmatcher->priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, inner,
+ ETH_RSS_TCP,
+ (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP));
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
+ inner);
+ tmatcher->priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->verbs.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, inner,
+ ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
+ inner);
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
+ inner);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
+ inner);
+ break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ flow_dv_translate_item_meta(tmatcher->mask.buf, key, item);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * Store the requested actions in an array.
+ *
+ * @param[in] action
+ * Flow action to translate.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ */
+static void
+flow_dv_create_action(const struct rte_flow_action *action,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ int actions_n = dev_flow->dv.actions_n;
+ struct rte_flow *flow = dev_flow->flow;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
+ dev_flow->dv.actions[actions_n].tag_value =
+ mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ actions_n++;
+ flow->actions |= MLX5_FLOW_ACTION_FLAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
+ dev_flow->dv.actions[actions_n].tag_value =
+ mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (action->conf))->id);
+ flow->actions |= MLX5_FLOW_ACTION_MARK;
+ actions_n++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
+ flow->actions |= MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = action->conf;
+ flow->rss.queue_num = 1;
+ (*flow->queue)[0] = queue->index;
+ flow->actions |= MLX5_FLOW_ACTION_QUEUE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = action->conf;
+ if (flow->queue)
+ memcpy((*flow->queue), rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ flow->rss.queue_num = rss->queue_num;
+ memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
+ flow->rss.types = rss->types;
+ flow->rss.level = rss->level;
+ /* Added to array only in apply since we need the QP */
+ flow->actions |= MLX5_FLOW_ACTION_RSS;
+ break;
+ default:
+ break;
+ }
+ dev_flow->dv.actions_n = actions_n;
+}
+
+static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
+
+#define HEADER_IS_ZERO(match_criteria, headers) \
+ !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
+ matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
+
+/**
+ * Calculate flow matcher enable bitmap.
+ *
+ * @param match_criteria
+ * Pointer to flow matcher criteria.
+ *
+ * @return
+ * Bitmap of enabled fields.
+ */
+static uint8_t
+flow_dv_matcher_enable(uint32_t *match_criteria)
+{
+ uint8_t match_criteria_enable;
+
+ match_criteria_enable =
+ (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
+
+ return match_criteria_enable;
+}
+
+/**
+ * Register the flow matcher.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] matcher
+ * Pointer to flow matcher.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_matcher_register(struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_matcher *matcher,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_matcher *cache_matcher;
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .match_mask = (void *)&matcher->mask,
+ };
+
+ /* Lookup from cache. */
+ LIST_FOREACH(cache_matcher, &priv->matchers, next) {
+ if (matcher->crc == cache_matcher->crc &&
+ matcher->priority == cache_matcher->priority &&
+ matcher->egress == cache_matcher->egress &&
+ !memcmp((const void *)matcher->mask.buf,
+ (const void *)cache_matcher->mask.buf,
+ cache_matcher->mask.size)) {
+ DRV_LOG(DEBUG,
+ "priority %hd use %s matcher %p: refcnt %d++",
+ cache_matcher->priority,
+ cache_matcher->egress ? "tx" : "rx",
+ (void *)cache_matcher,
+ rte_atomic32_read(&cache_matcher->refcnt));
+ rte_atomic32_inc(&cache_matcher->refcnt);
+ dev_flow->dv.matcher = cache_matcher;
+ return 0;
+ }
+ }
+ /* Register new matcher. */
+ cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
+ if (!cache_matcher)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate matcher memory");
+ *cache_matcher = *matcher;
+ dv_attr.match_criteria_enable =
+ flow_dv_matcher_enable(cache_matcher->mask.buf);
+ dv_attr.priority = matcher->priority;
+ if (matcher->egress)
+ dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
+ cache_matcher->matcher_object =
+ mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
+ if (!cache_matcher->matcher_object)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create matcher");
+ rte_atomic32_inc(&cache_matcher->refcnt);
+ LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
+ dev_flow->dv.matcher = cache_matcher;
+ DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
+ cache_matcher->priority,
+ cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
+ rte_atomic32_read(&cache_matcher->refcnt));
+ return 0;
+}
+
+
+/**
+ * Fill the flow with DV spec.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint64_t priority = attr->priority;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf),
+ },
+ };
+ void *match_value = dev_flow->dv.value.buf;
+ int tunnel = 0;
+
+ if (priority == MLX5_FLOW_PRIO_RSVD)
+ priority = priv->config.flow_prio - 1;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ flow_dv_create_item(&matcher, match_value, items, dev_flow,
+ tunnel);
+ }
+ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ matcher.mask.size);
+ if (priority == MLX5_FLOW_PRIO_RSVD)
+ priority = priv->config.flow_prio - 1;
+ matcher.priority = mlx5_flow_adjust_priority(dev, priority,
+ matcher.priority);
+ matcher.egress = attr->egress;
+ if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+ return -rte_errno;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
+ flow_dv_create_action(actions, dev_flow);
+ return 0;
+}
+
+/**
+ * Apply the flow to the NIC.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv *dv;
+ struct mlx5_flow *dev_flow;
+ int n;
+ int err;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ dv = &dev_flow->dv;
+ n = dv->actions_n;
+ if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+ dv->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dv->hrxq) {
+ rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get drop hash queue");
+ goto error;
+ }
+ dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
+ dv->actions[n].qp = dv->hrxq->qp;
+ n++;
+ } else if (flow->actions &
+ (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
+ struct mlx5_hrxq *hrxq;
+ hrxq = mlx5_hrxq_get(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ dv->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num);
+ if (!hrxq)
+ hrxq = mlx5_hrxq_new
+ (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
+ dv->hash_fields, (*flow->queue),
+ flow->rss.queue_num,
+ !!(dev_flow->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ dv->hrxq = hrxq;
+ dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
+ dv->actions[n].qp = hrxq->qp;
+ n++;
+ }
+ dv->flow =
+ mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+ (void *)&dv->value, n,
+ dv->actions);
+ if (!dv->flow) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "hardware refuses to create flow");
+ goto error;
+ }
+ }
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ struct mlx5_flow_dv *dv = &dev_flow->dv;
+ if (dv->hrxq) {
+ if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, dv->hrxq);
+ dv->hrxq = NULL;
+ }
+ }
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Release the flow matcher.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to mlx5_flow.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_matcher_release(struct rte_eth_dev *dev,
+ struct mlx5_flow *flow)
+{
+ struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+
+ assert(matcher->matcher_object);
+ DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
+ dev->data->port_id, (void *)matcher,
+ rte_atomic32_read(&matcher->refcnt));
+ if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
+ claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ (matcher->matcher_object));
+ LIST_REMOVE(matcher, next);
+ rte_free(matcher);
+ DRV_LOG(DEBUG, "port %u matcher %p: removed",
+ dev->data->port_id, (void *)matcher);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Remove the flow from the NIC but keeps it in memory.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow_dv *dv;
+ struct mlx5_flow *dev_flow;
+
+ if (!flow)
+ return;
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ dv = &dev_flow->dv;
+ if (dv->flow) {
+ claim_zero(mlx5_glue->destroy_flow(dv->flow));
+ dv->flow = NULL;
+ }
+ if (dv->hrxq) {
+ if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, dv->hrxq);
+ dv->hrxq = NULL;
+ }
+ }
+ if (flow->counter)
+ flow->counter = NULL;
+}
+
+/**
+ * Remove the flow from the NIC and the memory.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ if (!flow)
+ return;
+ flow_dv_remove(dev, flow);
+ while (!LIST_EMPTY(&flow->dev_flows)) {
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ LIST_REMOVE(dev_flow, next);
+ if (dev_flow->dv.matcher)
+ flow_dv_matcher_release(dev, dev_flow);
+ rte_free(dev_flow);
+ }
+}
+
+/**
+ * Query a flow.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+static int
+flow_dv_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+
+const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
+ .validate = flow_dv_validate,
+ .prepare = flow_dv_prepare,
+ .translate = flow_dv_translate,
+ .apply = flow_dv_apply,
+ .remove = flow_dv_remove,
+ .destroy = flow_dv_destroy,
+ .query = flow_dv_query,
+};
+
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c
new file mode 100644
index 00000000..719fb106
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_tcf.c
@@ -0,0 +1,2913 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <libmnl/libmnl.h>
+#include <linux/gen_stats.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/pkt_cls.h>
+#include <linux/pkt_sched.h>
+#include <linux/rtnetlink.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/tc_act/tc_mirred.h>
+#include <netinet/in.h>
+#include <stdalign.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_malloc.h>
+#include <rte_common.h>
+
+#include "mlx5.h"
+#include "mlx5_flow.h"
+#include "mlx5_autoconf.h"
+
+#ifdef HAVE_TC_ACT_VLAN
+
+#include <linux/tc_act/tc_vlan.h>
+
+#else /* HAVE_TC_ACT_VLAN */
+
+#define TCA_VLAN_ACT_POP 1
+#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
+#define TCA_VLAN_PARMS 2
+#define TCA_VLAN_PUSH_VLAN_ID 3
+#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4
+#define TCA_VLAN_PAD 5
+#define TCA_VLAN_PUSH_VLAN_PRIORITY 6
+
+struct tc_vlan {
+ tc_gen;
+ int v_action;
+};
+
+#endif /* HAVE_TC_ACT_VLAN */
+
+#ifdef HAVE_TC_ACT_PEDIT
+
+#include <linux/tc_act/tc_pedit.h>
+
+#else /* HAVE_TC_ACT_VLAN */
+
+enum {
+ TCA_PEDIT_UNSPEC,
+ TCA_PEDIT_TM,
+ TCA_PEDIT_PARMS,
+ TCA_PEDIT_PAD,
+ TCA_PEDIT_PARMS_EX,
+ TCA_PEDIT_KEYS_EX,
+ TCA_PEDIT_KEY_EX,
+ __TCA_PEDIT_MAX
+};
+
+enum {
+ TCA_PEDIT_KEY_EX_HTYPE = 1,
+ TCA_PEDIT_KEY_EX_CMD = 2,
+ __TCA_PEDIT_KEY_EX_MAX
+};
+
+enum pedit_header_type {
+ TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0,
+ TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1,
+ TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2,
+ TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3,
+ TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4,
+ TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
+ __PEDIT_HDR_TYPE_MAX,
+};
+
+enum pedit_cmd {
+ TCA_PEDIT_KEY_EX_CMD_SET = 0,
+ TCA_PEDIT_KEY_EX_CMD_ADD = 1,
+ __PEDIT_CMD_MAX,
+};
+
+struct tc_pedit_key {
+ __u32 mask; /* AND */
+ __u32 val; /*XOR */
+ __u32 off; /*offset */
+ __u32 at;
+ __u32 offmask;
+ __u32 shift;
+};
+
+__extension__
+struct tc_pedit_sel {
+ tc_gen;
+ unsigned char nkeys;
+ unsigned char flags;
+ struct tc_pedit_key keys[0];
+};
+
+#endif /* HAVE_TC_ACT_VLAN */
+
+/* Normally found in linux/netlink.h. */
+#ifndef NETLINK_CAP_ACK
+#define NETLINK_CAP_ACK 10
+#endif
+
+/* Normally found in linux/pkt_sched.h. */
+#ifndef TC_H_MIN_INGRESS
+#define TC_H_MIN_INGRESS 0xfff2u
+#endif
+
+/* Normally found in linux/pkt_cls.h. */
+#ifndef TCA_CLS_FLAGS_SKIP_SW
+#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
+#endif
+#ifndef HAVE_TCA_CHAIN
+#define TCA_CHAIN 11
+#endif
+#ifndef HAVE_TCA_FLOWER_ACT
+#define TCA_FLOWER_ACT 3
+#endif
+#ifndef HAVE_TCA_FLOWER_FLAGS
+#define TCA_FLOWER_FLAGS 22
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
+#define TCA_FLOWER_KEY_ETH_TYPE 8
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
+#define TCA_FLOWER_KEY_ETH_DST 4
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
+#define TCA_FLOWER_KEY_ETH_DST_MASK 5
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
+#define TCA_FLOWER_KEY_ETH_SRC 6
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
+#define TCA_FLOWER_KEY_ETH_SRC_MASK 7
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
+#define TCA_FLOWER_KEY_IP_PROTO 9
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
+#define TCA_FLOWER_KEY_IPV4_SRC 10
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
+#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
+#define TCA_FLOWER_KEY_IPV4_DST 12
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
+#define TCA_FLOWER_KEY_IPV4_DST_MASK 13
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
+#define TCA_FLOWER_KEY_IPV6_SRC 14
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
+#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
+#define TCA_FLOWER_KEY_IPV6_DST 16
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
+#define TCA_FLOWER_KEY_IPV6_DST_MASK 17
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
+#define TCA_FLOWER_KEY_TCP_SRC 18
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
+#define TCA_FLOWER_KEY_TCP_SRC_MASK 35
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
+#define TCA_FLOWER_KEY_TCP_DST 19
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
+#define TCA_FLOWER_KEY_TCP_DST_MASK 36
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
+#define TCA_FLOWER_KEY_UDP_SRC 20
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
+#define TCA_FLOWER_KEY_UDP_SRC_MASK 37
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
+#define TCA_FLOWER_KEY_UDP_DST 21
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
+#define TCA_FLOWER_KEY_UDP_DST_MASK 38
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
+#define TCA_FLOWER_KEY_VLAN_ID 23
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
+#define TCA_FLOWER_KEY_VLAN_PRIO 24
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
+#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS
+#define TCA_FLOWER_KEY_TCP_FLAGS 71
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK
+#define TCA_FLOWER_KEY_TCP_FLAGS_MASK 72
+#endif
+#ifndef HAVE_TC_ACT_GOTO_CHAIN
+#define TC_ACT_GOTO_CHAIN 0x20000000
+#endif
+
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN 16
+#endif
+
+#ifndef IPV4_ADDR_LEN
+#define IPV4_ADDR_LEN 4
+#endif
+
+#ifndef TP_PORT_LEN
+#define TP_PORT_LEN 2 /* Transport Port (UDP/TCP) Length */
+#endif
+
+#ifndef TTL_LEN
+#define TTL_LEN 1
+#endif
+
+#ifndef TCA_ACT_MAX_PRIO
+#define TCA_ACT_MAX_PRIO 32
+#endif
+
+/**
+ * Structure for holding netlink context.
+ * Note the size of the message buffer which is MNL_SOCKET_BUFFER_SIZE.
+ * Using this (8KB) buffer size ensures that netlink messages will never be
+ * truncated.
+ */
+struct mlx5_flow_tcf_context {
+ struct mnl_socket *nl; /* NETLINK_ROUTE libmnl socket. */
+ uint32_t seq; /* Message sequence number. */
+ uint32_t buf_size; /* Message buffer size. */
+ uint8_t *buf; /* Message buffer. */
+};
+
+/** Structure used when extracting the values of a flow counters
+ * from a netlink message.
+ */
+struct flow_tcf_stats_basic {
+ bool valid;
+ struct gnet_stats_basic counters;
+};
+
+/** Empty masks for known item types. */
+static const union {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} flow_tcf_mask_empty;
+
+/** Supported masks for known item types. */
+static const struct {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} flow_tcf_mask_supported = {
+ .port_id = {
+ .id = 0xffffffff,
+ },
+ .eth = {
+ .type = RTE_BE16(0xffff),
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .vlan = {
+ /* PCP and VID only, no DEI. */
+ .tci = RTE_BE16(0xefff),
+ .inner_type = RTE_BE16(0xffff),
+ },
+ .ipv4.hdr = {
+ .next_proto_id = 0xff,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ },
+ .ipv6.hdr = {
+ .proto = 0xff,
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .tcp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ .tcp_flags = 0xff,
+ },
+ .udp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+};
+
+#define SZ_NLATTR_HDR MNL_ALIGN(sizeof(struct nlattr))
+#define SZ_NLATTR_NEST SZ_NLATTR_HDR
+#define SZ_NLATTR_DATA_OF(len) MNL_ALIGN(SZ_NLATTR_HDR + (len))
+#define SZ_NLATTR_TYPE_OF(typ) SZ_NLATTR_DATA_OF(sizeof(typ))
+#define SZ_NLATTR_STRZ_OF(str) SZ_NLATTR_DATA_OF(strlen(str) + 1)
+
+#define PTOI_TABLE_SZ_MAX(dev) (mlx5_dev_to_port_id((dev)->device, NULL, 0) + 2)
+
+/** DPDK port to network interface index (ifindex) conversion. */
+struct flow_tcf_ptoi {
+ uint16_t port_id; /**< DPDK port ID. */
+ unsigned int ifindex; /**< Network interface index. */
+};
+
+/* Due to a limitation on driver/FW. */
+#define MLX5_TCF_GROUP_ID_MAX 3
+#define MLX5_TCF_GROUP_PRIORITY_MAX 14
+
+#define MLX5_TCF_FATE_ACTIONS \
+ (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
+ MLX5_FLOW_ACTION_JUMP)
+
+#define MLX5_TCF_VLAN_ACTIONS \
+ (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN | \
+ MLX5_FLOW_ACTION_OF_SET_VLAN_VID | MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
+
+#define MLX5_TCF_PEDIT_ACTIONS \
+ (MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST | \
+ MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST | \
+ MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST | \
+ MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL | \
+ MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)
+
+#define MLX5_TCF_CONFIG_ACTIONS \
+ (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_JUMP | \
+ MLX5_FLOW_ACTION_OF_PUSH_VLAN | MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
+ MLX5_FLOW_ACTION_OF_SET_VLAN_PCP | \
+ (MLX5_TCF_PEDIT_ACTIONS & ~MLX5_FLOW_ACTION_DEC_TTL))
+
+#define MAX_PEDIT_KEYS 128
+#define SZ_PEDIT_KEY_VAL 4
+
+#define NUM_OF_PEDIT_KEYS(sz) \
+ (((sz) / SZ_PEDIT_KEY_VAL) + (((sz) % SZ_PEDIT_KEY_VAL) ? 1 : 0))
+
+struct pedit_key_ex {
+ enum pedit_header_type htype;
+ enum pedit_cmd cmd;
+};
+
+struct pedit_parser {
+ struct tc_pedit_sel sel;
+ struct tc_pedit_key keys[MAX_PEDIT_KEYS];
+ struct pedit_key_ex keys_ex[MAX_PEDIT_KEYS];
+};
+
+/**
+ * Create space for using the implicitly created TC flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ *
+ * @return
+ * A pointer to the counter data structure, NULL otherwise and
+ * rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+flow_tcf_counter_new(void)
+{
+ struct mlx5_flow_counter *cnt;
+
+ /*
+ * eswitch counter cannot be shared and its id is unknown.
+ * currently returning all with id 0.
+ * in the future maybe better to switch to unique numbers.
+ */
+ struct mlx5_flow_counter tmpl = {
+ .ref_cnt = 1,
+ };
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ *cnt = tmpl;
+ /* Implicit counter, do not add to list. */
+ return cnt;
+}
+
+/**
+ * Set pedit key of MAC address
+ *
+ * @param[in] actions
+ * pointer to action specification
+ * @param[in,out] p_parser
+ * pointer to pedit_parser
+ */
+static void
+flow_tcf_pedit_key_set_mac(const struct rte_flow_action *actions,
+ struct pedit_parser *p_parser)
+{
+ int idx = p_parser->sel.nkeys;
+ uint32_t off = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
+ offsetof(struct ether_hdr, s_addr) :
+ offsetof(struct ether_hdr, d_addr);
+ const struct rte_flow_action_set_mac *conf =
+ (const struct rte_flow_action_set_mac *)actions->conf;
+
+ p_parser->keys[idx].off = off;
+ p_parser->keys[idx].mask = ~UINT32_MAX;
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;
+ p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ memcpy(&p_parser->keys[idx].val,
+ conf->mac_addr, SZ_PEDIT_KEY_VAL);
+ idx++;
+ p_parser->keys[idx].off = off + SZ_PEDIT_KEY_VAL;
+ p_parser->keys[idx].mask = 0xFFFF0000;
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;
+ p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ memcpy(&p_parser->keys[idx].val,
+ conf->mac_addr + SZ_PEDIT_KEY_VAL,
+ ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
+ p_parser->sel.nkeys = (++idx);
+}
+
+/**
+ * Set pedit key of decrease/set ttl
+ *
+ * @param[in] actions
+ * pointer to action specification
+ * @param[in,out] p_parser
+ * pointer to pedit_parser
+ * @param[in] item_flags
+ * flags of all items presented
+ */
+static void
+flow_tcf_pedit_key_set_dec_ttl(const struct rte_flow_action *actions,
+ struct pedit_parser *p_parser,
+ uint64_t item_flags)
+{
+ int idx = p_parser->sel.nkeys;
+
+ p_parser->keys[idx].mask = 0xFFFFFF00;
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
+ p_parser->keys[idx].off =
+ offsetof(struct ipv4_hdr, time_to_live);
+ }
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
+ p_parser->keys[idx].off =
+ offsetof(struct ipv6_hdr, hop_limits);
+ }
+ if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
+ p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
+ p_parser->keys[idx].val = 0x000000FF;
+ } else {
+ p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ p_parser->keys[idx].val =
+ (__u32)((const struct rte_flow_action_set_ttl *)
+ actions->conf)->ttl_value;
+ }
+ p_parser->sel.nkeys = (++idx);
+}
+
+/**
+ * Set pedit key of transport (TCP/UDP) port value
+ *
+ * @param[in] actions
+ * pointer to action specification
+ * @param[in,out] p_parser
+ * pointer to pedit_parser
+ * @param[in] item_flags
+ * flags of all items presented
+ */
+static void
+flow_tcf_pedit_key_set_tp_port(const struct rte_flow_action *actions,
+ struct pedit_parser *p_parser,
+ uint64_t item_flags)
+{
+ int idx = p_parser->sel.nkeys;
+
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP;
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP;
+ p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ /* offset of src/dst port is same for TCP and UDP */
+ p_parser->keys[idx].off =
+ actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
+ offsetof(struct tcp_hdr, src_port) :
+ offsetof(struct tcp_hdr, dst_port);
+ p_parser->keys[idx].mask = 0xFFFF0000;
+ p_parser->keys[idx].val =
+ (__u32)((const struct rte_flow_action_set_tp *)
+ actions->conf)->port;
+ p_parser->sel.nkeys = (++idx);
+}
+
+/**
+ * Set pedit key of ipv6 address
+ *
+ * @param[in] actions
+ * pointer to action specification
+ * @param[in,out] p_parser
+ * pointer to pedit_parser
+ */
+static void
+flow_tcf_pedit_key_set_ipv6_addr(const struct rte_flow_action *actions,
+ struct pedit_parser *p_parser)
+{
+ int idx = p_parser->sel.nkeys;
+ int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
+ int off_base =
+ actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
+ offsetof(struct ipv6_hdr, src_addr) :
+ offsetof(struct ipv6_hdr, dst_addr);
+ const struct rte_flow_action_set_ipv6 *conf =
+ (const struct rte_flow_action_set_ipv6 *)actions->conf;
+
+ for (int i = 0; i < keys; i++, idx++) {
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
+ p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ p_parser->keys[idx].off = off_base + i * SZ_PEDIT_KEY_VAL;
+ p_parser->keys[idx].mask = ~UINT32_MAX;
+ memcpy(&p_parser->keys[idx].val,
+ conf->ipv6_addr + i * SZ_PEDIT_KEY_VAL,
+ SZ_PEDIT_KEY_VAL);
+ }
+ p_parser->sel.nkeys += keys;
+}
+
+/**
+ * Set pedit key of ipv4 address
+ *
+ * @param[in] actions
+ * pointer to action specification
+ * @param[in,out] p_parser
+ * pointer to pedit_parser
+ */
+static void
+flow_tcf_pedit_key_set_ipv4_addr(const struct rte_flow_action *actions,
+ struct pedit_parser *p_parser)
+{
+ int idx = p_parser->sel.nkeys;
+
+ p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
+ p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ p_parser->keys[idx].off =
+ actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
+ offsetof(struct ipv4_hdr, src_addr) :
+ offsetof(struct ipv4_hdr, dst_addr);
+ p_parser->keys[idx].mask = ~UINT32_MAX;
+ p_parser->keys[idx].val =
+ ((const struct rte_flow_action_set_ipv4 *)
+ actions->conf)->ipv4_addr;
+ p_parser->sel.nkeys = (++idx);
+}
+
+/**
+ * Create the pedit's na attribute in netlink message
+ * on pre-allocate message buffer
+ *
+ * @param[in,out] nl
+ * pointer to pre-allocated netlink message buffer
+ * @param[in,out] actions
+ * pointer to pointer of actions specification.
+ * @param[in,out] action_flags
+ * pointer to actions flags
+ * @param[in] item_flags
+ * flags of all item presented
+ */
+static void
+flow_tcf_create_pedit_mnl_msg(struct nlmsghdr *nl,
+ const struct rte_flow_action **actions,
+ uint64_t item_flags)
+{
+ struct pedit_parser p_parser;
+ struct nlattr *na_act_options;
+ struct nlattr *na_pedit_keys;
+
+ memset(&p_parser, 0, sizeof(p_parser));
+ mnl_attr_put_strz(nl, TCA_ACT_KIND, "pedit");
+ na_act_options = mnl_attr_nest_start(nl, TCA_ACT_OPTIONS);
+ /* all modify header actions should be in one tc-pedit action */
+ for (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {
+ switch ((*actions)->type) {
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ flow_tcf_pedit_key_set_ipv4_addr(*actions, &p_parser);
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ flow_tcf_pedit_key_set_ipv6_addr(*actions, &p_parser);
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ flow_tcf_pedit_key_set_tp_port(*actions,
+ &p_parser, item_flags);
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ flow_tcf_pedit_key_set_dec_ttl(*actions,
+ &p_parser, item_flags);
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ flow_tcf_pedit_key_set_mac(*actions, &p_parser);
+ break;
+ default:
+ goto pedit_mnl_msg_done;
+ }
+ }
+pedit_mnl_msg_done:
+ p_parser.sel.action = TC_ACT_PIPE;
+ mnl_attr_put(nl, TCA_PEDIT_PARMS_EX,
+ sizeof(p_parser.sel) +
+ p_parser.sel.nkeys * sizeof(struct tc_pedit_key),
+ &p_parser);
+ na_pedit_keys =
+ mnl_attr_nest_start(nl, TCA_PEDIT_KEYS_EX | NLA_F_NESTED);
+ for (int i = 0; i < p_parser.sel.nkeys; i++) {
+ struct nlattr *na_pedit_key =
+ mnl_attr_nest_start(nl,
+ TCA_PEDIT_KEY_EX | NLA_F_NESTED);
+ mnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_HTYPE,
+ p_parser.keys_ex[i].htype);
+ mnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_CMD,
+ p_parser.keys_ex[i].cmd);
+ mnl_attr_nest_end(nl, na_pedit_key);
+ }
+ mnl_attr_nest_end(nl, na_pedit_keys);
+ mnl_attr_nest_end(nl, na_act_options);
+ (*actions)--;
+}
+
+/**
+ * Calculate max memory size of one TC-pedit actions.
+ * One TC-pedit action can contain set of keys each defining
+ * a rewrite element (rte_flow action)
+ *
+ * @param[in,out] actions
+ * actions specification.
+ * @param[in,out] action_flags
+ * actions flags
+ * @param[in,out] size
+ * accumulated size
+ * @return
+ * Max memory size of one TC-pedit action
+ */
+static int
+flow_tcf_get_pedit_actions_size(const struct rte_flow_action **actions,
+ uint64_t *action_flags)
+{
+ int pedit_size = 0;
+ int keys = 0;
+ uint64_t flags = 0;
+
+ pedit_size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("pedit") +
+ SZ_NLATTR_NEST; /* TCA_ACT_OPTIONS. */
+ for (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {
+ switch ((*actions)->type) {
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ keys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_IPV4_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ keys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_IPV4_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ keys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_IPV6_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ keys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_IPV6_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ /* TCP is as same as UDP */
+ keys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_TP_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ /* TCP is as same as UDP */
+ keys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_TP_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ keys += NUM_OF_PEDIT_KEYS(TTL_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ keys += NUM_OF_PEDIT_KEYS(TTL_LEN);
+ flags |= MLX5_FLOW_ACTION_DEC_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_MAC_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ flags |= MLX5_FLOW_ACTION_SET_MAC_DST;
+ break;
+ default:
+ goto get_pedit_action_size_done;
+ }
+ }
+get_pedit_action_size_done:
+ /* TCA_PEDIT_PARAMS_EX */
+ pedit_size +=
+ SZ_NLATTR_DATA_OF(sizeof(struct tc_pedit_sel) +
+ keys * sizeof(struct tc_pedit_key));
+ pedit_size += SZ_NLATTR_NEST; /* TCA_PEDIT_KEYS */
+ pedit_size += keys *
+ /* TCA_PEDIT_KEY_EX + HTYPE + CMD */
+ (SZ_NLATTR_NEST + SZ_NLATTR_DATA_OF(2) +
+ SZ_NLATTR_DATA_OF(2));
+ (*action_flags) |= flags;
+ (*actions)--;
+ return pedit_size;
+}
+
+/**
+ * Retrieve mask for pattern item.
+ *
+ * This function does basic sanity checks on a pattern item in order to
+ * return the most appropriate mask for it.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask_default
+ * Default mask for pattern item as specified by the flow API.
+ * @param[in] mask_supported
+ * Mask fields supported by the implementation.
+ * @param[in] mask_empty
+ * Empty mask to return when there is no specification.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Either @p item->mask or one of the mask parameters on success, NULL
+ * otherwise and rte_errno is set.
+ */
+static const void *
+flow_tcf_item_mask(const struct rte_flow_item *item, const void *mask_default,
+ const void *mask_supported, const void *mask_empty,
+ size_t mask_size, struct rte_flow_error *error)
+{
+ const uint8_t *mask;
+ size_t i;
+
+ /* item->last and item->mask cannot exist without item->spec. */
+ if (!item->spec && (item->mask || item->last)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "\"mask\" or \"last\" field provided without"
+ " a corresponding \"spec\"");
+ return NULL;
+ }
+ /* No spec, no mask, no problem. */
+ if (!item->spec)
+ return mask_empty;
+ mask = item->mask ? item->mask : mask_default;
+ assert(mask);
+ /*
+ * Single-pass check to make sure that:
+ * - Mask is supported, no bits are set outside mask_supported.
+ * - Both item->spec and item->last are included in mask.
+ */
+ for (i = 0; i != mask_size; ++i) {
+ if (!mask[i])
+ continue;
+ if ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=
+ ((const uint8_t *)mask_supported)[i]) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "unsupported field found"
+ " in \"mask\"");
+ return NULL;
+ }
+ if (item->last &&
+ (((const uint8_t *)item->spec)[i] & mask[i]) !=
+ (((const uint8_t *)item->last)[i] & mask[i])) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ item->last,
+ "range between \"spec\" and \"last\""
+ " not comprised in \"mask\"");
+ return NULL;
+ }
+ }
+ return mask;
+}
+
+/**
+ * Build a conversion table between port ID and ifindex.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[out] ptoi
+ * Pointer to ptoi table.
+ * @param[in] len
+ * Size of ptoi table provided.
+ *
+ * @return
+ * Size of ptoi table filled.
+ */
+static unsigned int
+flow_tcf_build_ptoi_table(struct rte_eth_dev *dev, struct flow_tcf_ptoi *ptoi,
+ unsigned int len)
+{
+ unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[n + 1];
+ unsigned int i;
+ unsigned int own = 0;
+
+ /* At least one port is needed when no switch domain is present. */
+ if (!n) {
+ n = 1;
+ port_id[0] = dev->data->port_id;
+ } else {
+ n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
+ }
+ if (n > len)
+ return 0;
+ for (i = 0; i != n; ++i) {
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port_id[i], &dev_info);
+ if (port_id[i] == dev->data->port_id)
+ own = i;
+ ptoi[i].port_id = port_id[i];
+ ptoi[i].ifindex = dev_info.if_index;
+ }
+ /* Ensure first entry of ptoi[] is the current device. */
+ if (own) {
+ ptoi[n] = ptoi[0];
+ ptoi[0] = ptoi[own];
+ ptoi[own] = ptoi[n];
+ }
+ /* An entry with zero ifindex terminates ptoi[]. */
+ ptoi[n].port_id = 0;
+ ptoi[n].ifindex = 0;
+ return n;
+}
+
+/**
+ * Verify the @p attr will be correctly understood by the E-switch.
+ *
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_tcf_validate_attributes(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /*
+ * Supported attributes: groups, some priorities and ingress only.
+ * group is supported only if kernel supports chain. Don't care about
+ * transfer as it is the caller's problem.
+ */
+ if (attr->group > MLX5_TCF_GROUP_ID_MAX)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "group ID larger than "
+ RTE_STR(MLX5_TCF_GROUP_ID_MAX)
+ " isn't supported");
+ else if (attr->group > 0 &&
+ attr->priority > MLX5_TCF_GROUP_PRIORITY_MAX)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr,
+ "lowest priority level is "
+ RTE_STR(MLX5_TCF_GROUP_PRIORITY_MAX)
+ " when group is configured");
+ else if (attr->priority > 0xfffe)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr,
+ "lowest priority level is 0xfffe");
+ if (!attr->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "only ingress is supported");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "egress is not supported");
+ return 0;
+}
+
+/**
+ * Validate flow for E-Switch.
+ *
+ * @param[in] priv
+ * Pointer to the priv structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_tcf_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ union {
+ const struct rte_flow_item_port_id *port_id;
+ const struct rte_flow_item_eth *eth;
+ const struct rte_flow_item_vlan *vlan;
+ const struct rte_flow_item_ipv4 *ipv4;
+ const struct rte_flow_item_ipv6 *ipv6;
+ const struct rte_flow_item_tcp *tcp;
+ const struct rte_flow_item_udp *udp;
+ } spec, mask;
+ union {
+ const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_jump *jump;
+ const struct rte_flow_action_of_push_vlan *of_push_vlan;
+ const struct rte_flow_action_of_set_vlan_vid *
+ of_set_vlan_vid;
+ const struct rte_flow_action_of_set_vlan_pcp *
+ of_set_vlan_pcp;
+ const struct rte_flow_action_set_ipv4 *set_ipv4;
+ const struct rte_flow_action_set_ipv6 *set_ipv6;
+ } conf;
+ uint64_t item_flags = 0;
+ uint64_t action_flags = 0;
+ uint8_t next_protocol = -1;
+ unsigned int tcm_ifindex = 0;
+ uint8_t pedit_validated = 0;
+ struct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];
+ struct rte_eth_dev *port_id_dev = NULL;
+ bool in_port_id_set;
+ int ret;
+
+ claim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,
+ PTOI_TABLE_SZ_MAX(dev)));
+ ret = flow_tcf_validate_attributes(attr, error);
+ if (ret < 0)
+ return ret;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ unsigned int i;
+
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ mask.port_id = flow_tcf_item_mask
+ (items, &rte_flow_item_port_id_mask,
+ &flow_tcf_mask_supported.port_id,
+ &flow_tcf_mask_empty.port_id,
+ sizeof(flow_tcf_mask_supported.port_id),
+ error);
+ if (!mask.port_id)
+ return -rte_errno;
+ if (mask.port_id == &flow_tcf_mask_empty.port_id) {
+ in_port_id_set = 1;
+ break;
+ }
+ spec.port_id = items->spec;
+ if (mask.port_id->id && mask.port_id->id != 0xffffffff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.port_id,
+ "no support for partial mask on"
+ " \"id\" field");
+ if (!mask.port_id->id)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == spec.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "missing data to convert port ID to"
+ " ifindex");
+ if (in_port_id_set && ptoi[i].ifindex != tcm_ifindex)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "cannot match traffic for"
+ " several port IDs through"
+ " a single flow rule");
+ tcm_ifindex = ptoi[i].ifindex;
+ in_port_id_set = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L2;
+ /* TODO:
+ * Redundant check due to different supported mask.
+ * Same for the rest of items.
+ */
+ mask.eth = flow_tcf_item_mask
+ (items, &rte_flow_item_eth_mask,
+ &flow_tcf_mask_supported.eth,
+ &flow_tcf_mask_empty.eth,
+ sizeof(flow_tcf_mask_supported.eth),
+ error);
+ if (!mask.eth)
+ return -rte_errno;
+ if (mask.eth->type && mask.eth->type !=
+ RTE_BE16(0xffff))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.eth,
+ "no support for partial mask on"
+ " \"type\" field");
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_validate_item_vlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
+ mask.vlan = flow_tcf_item_mask
+ (items, &rte_flow_item_vlan_mask,
+ &flow_tcf_mask_supported.vlan,
+ &flow_tcf_mask_empty.vlan,
+ sizeof(flow_tcf_mask_supported.vlan),
+ error);
+ if (!mask.vlan)
+ return -rte_errno;
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ (mask.vlan->tci & RTE_BE16(0xe000)) !=
+ RTE_BE16(0xe000)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ (mask.vlan->tci & RTE_BE16(0x0fff)) !=
+ RTE_BE16(0x0fff)) ||
+ (mask.vlan->inner_type &&
+ mask.vlan->inner_type != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.vlan,
+ "no support for partial masks on"
+ " \"tci\" (PCP and VID parts) and"
+ " \"inner_type\" fields");
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ mask.ipv4 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv4_mask,
+ &flow_tcf_mask_supported.ipv4,
+ &flow_tcf_mask_empty.ipv4,
+ sizeof(flow_tcf_mask_supported.ipv4),
+ error);
+ if (!mask.ipv4)
+ return -rte_errno;
+ if (mask.ipv4->hdr.next_proto_id &&
+ mask.ipv4->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv4,
+ "no support for partial mask on"
+ " \"hdr.next_proto_id\" field");
+ else if (mask.ipv4->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ mask.ipv6 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv6_mask,
+ &flow_tcf_mask_supported.ipv6,
+ &flow_tcf_mask_empty.ipv6,
+ sizeof(flow_tcf_mask_supported.ipv6),
+ error);
+ if (!mask.ipv6)
+ return -rte_errno;
+ if (mask.ipv6->hdr.proto &&
+ mask.ipv6->hdr.proto != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv6,
+ "no support for partial mask on"
+ " \"hdr.proto\" field");
+ else if (mask.ipv6->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ (items->spec))->hdr.proto;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ mask.udp = flow_tcf_item_mask
+ (items, &rte_flow_item_udp_mask,
+ &flow_tcf_mask_supported.udp,
+ &flow_tcf_mask_empty.udp,
+ sizeof(flow_tcf_mask_supported.udp),
+ error);
+ if (!mask.udp)
+ return -rte_errno;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &flow_tcf_mask_supported.tcp,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ mask.tcp = flow_tcf_item_mask
+ (items, &rte_flow_item_tcp_mask,
+ &flow_tcf_mask_supported.tcp,
+ &flow_tcf_mask_empty.tcp,
+ sizeof(flow_tcf_mask_supported.tcp),
+ error);
+ if (!mask.tcp)
+ return -rte_errno;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ unsigned int i;
+ uint64_t current_action_flag = 0;
+
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ current_action_flag = MLX5_FLOW_ACTION_PORT_ID;
+ if (!actions->conf)
+ break;
+ conf.port_id = actions->conf;
+ if (conf.port_id->original)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == conf.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf.port_id,
+ "missing data to convert port ID to"
+ " ifindex");
+ port_id_dev = &rte_eth_devices[conf.port_id->id];
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ current_action_flag = MLX5_FLOW_ACTION_JUMP;
+ if (!actions->conf)
+ break;
+ conf.jump = actions->conf;
+ if (attr->group >= conf.jump->group)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "can jump only to a group forward");
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ current_action_flag = MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ current_action_flag = MLX5_FLOW_ACTION_OF_POP_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ current_action_flag = MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "vlan modify is not supported,"
+ " set action must follow push action");
+ current_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "vlan modify is not supported,"
+ " set action must follow push action");
+ current_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ current_action_flag = MLX5_FLOW_ACTION_SET_IPV4_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ current_action_flag = MLX5_FLOW_ACTION_SET_IPV4_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ current_action_flag = MLX5_FLOW_ACTION_SET_IPV6_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ current_action_flag = MLX5_FLOW_ACTION_SET_IPV6_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ current_action_flag = MLX5_FLOW_ACTION_SET_TP_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ current_action_flag = MLX5_FLOW_ACTION_SET_TP_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ current_action_flag = MLX5_FLOW_ACTION_SET_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ current_action_flag = MLX5_FLOW_ACTION_DEC_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ current_action_flag = MLX5_FLOW_ACTION_SET_MAC_SRC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ current_action_flag = MLX5_FLOW_ACTION_SET_MAC_DST;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (current_action_flag & MLX5_TCF_CONFIG_ACTIONS) {
+ if (!actions->conf)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ actions,
+ "action configuration not set");
+ }
+ if ((current_action_flag & MLX5_TCF_PEDIT_ACTIONS) &&
+ pedit_validated)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "set actions should be "
+ "listed successively");
+ if ((current_action_flag & ~MLX5_TCF_PEDIT_ACTIONS) &&
+ (action_flags & MLX5_TCF_PEDIT_ACTIONS))
+ pedit_validated = 1;
+ if ((current_action_flag & MLX5_TCF_FATE_ACTIONS) &&
+ (action_flags & MLX5_TCF_FATE_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "can't have multiple fate"
+ " actions");
+ action_flags |= current_action_flag;
+ }
+ if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
+ (action_flags & MLX5_FLOW_ACTION_DROP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "set action is not compatible with "
+ "drop action");
+ if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
+ !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "set action must be followed by "
+ "port_id action");
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST)) {
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no ipv4 item found in"
+ " pattern");
+ }
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST)) {
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no ipv6 item found in"
+ " pattern");
+ }
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST)) {
+ if (!(item_flags &
+ (MLX5_FLOW_LAYER_OUTER_L4_UDP |
+ MLX5_FLOW_LAYER_OUTER_L4_TCP)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no TCP/UDP item found in"
+ " pattern");
+ }
+ /*
+ * FW syndrome (0xA9C090):
+ * set_flow_table_entry: push vlan action fte in fdb can ONLY be
+ * forward to the uplink.
+ */
+ if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
+ (action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
+ ((struct priv *)port_id_dev->data->dev_private)->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "vlan push can only be applied"
+ " when forwarding to uplink port");
+ /*
+ * FW syndrome (0x294609):
+ * set_flow_table_entry: modify/pop/push actions in fdb flow table
+ * are supported only while forwarding to vport.
+ */
+ if ((action_flags & MLX5_TCF_VLAN_ACTIONS) &&
+ !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "vlan actions are supported"
+ " only with port_id action");
+ if (!(action_flags & MLX5_TCF_FATE_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "no fate action is found");
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL)) {
+ if (!(item_flags &
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no IP found in pattern");
+ }
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)) {
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no ethernet found in"
+ " pattern");
+ }
+ return 0;
+}
+
+/**
+ * Calculate maximum size of memory for flow items of Linux TC flower and
+ * extract specified items.
+ *
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[out] item_flags
+ * Pointer to the detected items.
+ *
+ * @return
+ * Maximum size of memory for items.
+ */
+static int
+flow_tcf_get_items_and_size(const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ uint64_t *item_flags)
+{
+ int size = 0;
+ uint64_t flags = 0;
+
+ size += SZ_NLATTR_STRZ_OF("flower") +
+ SZ_NLATTR_NEST + /* TCA_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CLS_FLAGS_SKIP_SW. */
+ if (attr->group > 0)
+ size += SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CHAIN. */
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
+ SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;
+ /* dst/src MAC addr and mask. */
+ flags |= MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
+ SZ_NLATTR_TYPE_OF(uint16_t) +
+ /* VLAN Ether type. */
+ SZ_NLATTR_TYPE_OF(uint8_t) + /* VLAN prio. */
+ SZ_NLATTR_TYPE_OF(uint16_t); /* VLAN ID. */
+ flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
+ SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_TYPE_OF(uint32_t) * 4;
+ /* dst/src IP addr and mask. */
+ flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ size += SZ_NLATTR_TYPE_OF(uint16_t) + /* Ether type. */
+ SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_TYPE_OF(IPV6_ADDR_LEN) * 4;
+ /* dst/src IP addr and mask. */
+ flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_TYPE_OF(uint16_t) * 4;
+ /* dst/src port and mask. */
+ flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_TYPE_OF(uint16_t) * 4;
+ /* dst/src port and mask. */
+ flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ default:
+ DRV_LOG(WARNING,
+ "unsupported item %p type %d,"
+ " items must be validated before flow creation",
+ (const void *)items, items->type);
+ break;
+ }
+ }
+ *item_flags = flags;
+ return size;
+}
+
+/**
+ * Calculate maximum size of memory for flow actions of Linux TC flower and
+ * extract specified actions.
+ *
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] action_flags
+ * Pointer to the detected actions.
+ *
+ * @return
+ * Maximum size of memory for actions.
+ */
+static int
+flow_tcf_get_actions_and_size(const struct rte_flow_action actions[],
+ uint64_t *action_flags)
+{
+ int size = 0;
+ uint64_t flags = 0;
+
+ size += SZ_NLATTR_NEST; /* TCA_FLOWER_ACT. */
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("mirred") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_mirred);
+ flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("gact") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_gact);
+ flags |= MLX5_FLOW_ACTION_JUMP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("gact") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_gact);
+ flags |= MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
+ goto action_of_vlan;
+action_of_vlan:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("vlan") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_vlan) +
+ SZ_NLATTR_TYPE_OF(uint16_t) +
+ /* VLAN protocol. */
+ SZ_NLATTR_TYPE_OF(uint16_t) + /* VLAN ID. */
+ SZ_NLATTR_TYPE_OF(uint8_t); /* VLAN prio. */
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ size += flow_tcf_get_pedit_actions_size(&actions,
+ &flags);
+ break;
+ default:
+ DRV_LOG(WARNING,
+ "unsupported action %p type %d,"
+ " items must be validated before flow creation",
+ (const void *)actions, actions->type);
+ break;
+ }
+ }
+ *action_flags = flags;
+ return size;
+}
+
+/**
+ * Brand rtnetlink buffer with unique handle.
+ *
+ * This handle should be unique for a given network interface to avoid
+ * collisions.
+ *
+ * @param nlh
+ * Pointer to Netlink message.
+ * @param handle
+ * Unique 32-bit handle to use.
+ */
+static void
+flow_tcf_nl_brand(struct nlmsghdr *nlh, uint32_t handle)
+{
+ struct tcmsg *tcm = mnl_nlmsg_get_payload(nlh);
+
+ tcm->tcm_handle = handle;
+ DRV_LOG(DEBUG, "Netlink msg %p is branded with handle %x",
+ (void *)nlh, handle);
+}
+
+/**
+ * Prepare a flow object for Linux TC flower. It calculates the maximum size of
+ * memory required, allocates the memory, initializes Netlink message headers
+ * and set unique TC message handle.
+ *
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] item_flags
+ * Pointer to bit mask of all items detected.
+ * @param[out] action_flags
+ * Pointer to bit mask of all actions detected.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to mlx5_flow object on success,
+ * otherwise NULL and rte_ernno is set.
+ */
+static struct mlx5_flow *
+flow_tcf_prepare(const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ uint64_t *item_flags, uint64_t *action_flags,
+ struct rte_flow_error *error)
+{
+ size_t size = sizeof(struct mlx5_flow) +
+ MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct tcmsg));
+ struct mlx5_flow *dev_flow;
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+
+ size += flow_tcf_get_items_and_size(attr, items, item_flags);
+ size += flow_tcf_get_actions_and_size(actions, action_flags);
+ dev_flow = rte_zmalloc(__func__, size, MNL_ALIGNTO);
+ if (!dev_flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to create E-Switch flow");
+ return NULL;
+ }
+ nlh = mnl_nlmsg_put_header((void *)(dev_flow + 1));
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ *dev_flow = (struct mlx5_flow){
+ .tcf = (struct mlx5_flow_tcf){
+ .nlh = nlh,
+ .tcm = tcm,
+ },
+ };
+ /*
+ * Generate a reasonably unique handle based on the address of the
+ * target buffer.
+ *
+ * This is straightforward on 32-bit systems where the flow pointer can
+ * be used directly. Otherwise, its least significant part is taken
+ * after shifting it by the previous power of two of the pointed buffer
+ * size.
+ */
+ if (sizeof(dev_flow) <= 4)
+ flow_tcf_nl_brand(nlh, (uintptr_t)dev_flow);
+ else
+ flow_tcf_nl_brand(nlh, (uintptr_t)dev_flow >>
+ rte_log2_u32(rte_align32prevpow2(size)));
+ return dev_flow;
+}
+
+/**
+ * Make adjustments for supporting count actions.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 On success else a negative errno value is returned and rte_errno is set.
+ */
+static int
+flow_tcf_translate_action_count(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = dev_flow->flow;
+
+ if (!flow->counter) {
+ flow->counter = flow_tcf_counter_new();
+ if (!flow->counter)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot get counter"
+ " context.");
+ }
+ return 0;
+}
+
+/**
+ * Translate flow for Linux TC flower and construct Netlink message.
+ *
+ * @param[in] priv
+ * Pointer to the priv structure.
+ * @param[in, out] flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ union {
+ const struct rte_flow_item_port_id *port_id;
+ const struct rte_flow_item_eth *eth;
+ const struct rte_flow_item_vlan *vlan;
+ const struct rte_flow_item_ipv4 *ipv4;
+ const struct rte_flow_item_ipv6 *ipv6;
+ const struct rte_flow_item_tcp *tcp;
+ const struct rte_flow_item_udp *udp;
+ } spec, mask;
+ union {
+ const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_jump *jump;
+ const struct rte_flow_action_of_push_vlan *of_push_vlan;
+ const struct rte_flow_action_of_set_vlan_vid *
+ of_set_vlan_vid;
+ const struct rte_flow_action_of_set_vlan_pcp *
+ of_set_vlan_pcp;
+ } conf;
+ struct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];
+ struct nlmsghdr *nlh = dev_flow->tcf.nlh;
+ struct tcmsg *tcm = dev_flow->tcf.tcm;
+ uint32_t na_act_index_cur;
+ bool eth_type_set = 0;
+ bool vlan_present = 0;
+ bool vlan_eth_type_set = 0;
+ bool ip_proto_set = 0;
+ struct nlattr *na_flower;
+ struct nlattr *na_flower_act;
+ struct nlattr *na_vlan_id = NULL;
+ struct nlattr *na_vlan_priority = NULL;
+ uint64_t item_flags = 0;
+ int ret;
+
+ claim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,
+ PTOI_TABLE_SZ_MAX(dev)));
+ nlh = dev_flow->tcf.nlh;
+ tcm = dev_flow->tcf.tcm;
+ /* Prepare API must have been called beforehand. */
+ assert(nlh != NULL && tcm != NULL);
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ptoi[0].ifindex;
+ tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);
+ /*
+ * Priority cannot be zero to prevent the kernel from picking one
+ * automatically.
+ */
+ tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16,
+ RTE_BE16(ETH_P_ALL));
+ if (attr->group > 0)
+ mnl_attr_put_u32(nlh, TCA_CHAIN, attr->group);
+ mnl_attr_put_strz(nlh, TCA_KIND, "flower");
+ na_flower = mnl_attr_nest_start(nlh, TCA_OPTIONS);
+ mnl_attr_put_u32(nlh, TCA_FLOWER_FLAGS, TCA_CLS_FLAGS_SKIP_SW);
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ unsigned int i;
+
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ mask.port_id = flow_tcf_item_mask
+ (items, &rte_flow_item_port_id_mask,
+ &flow_tcf_mask_supported.port_id,
+ &flow_tcf_mask_empty.port_id,
+ sizeof(flow_tcf_mask_supported.port_id),
+ error);
+ assert(mask.port_id);
+ if (mask.port_id == &flow_tcf_mask_empty.port_id)
+ break;
+ spec.port_id = items->spec;
+ if (!mask.port_id->id)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == spec.port_id->id)
+ break;
+ assert(ptoi[i].ifindex);
+ tcm->tcm_ifindex = ptoi[i].ifindex;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L2;
+ mask.eth = flow_tcf_item_mask
+ (items, &rte_flow_item_eth_mask,
+ &flow_tcf_mask_supported.eth,
+ &flow_tcf_mask_empty.eth,
+ sizeof(flow_tcf_mask_supported.eth),
+ error);
+ assert(mask.eth);
+ if (mask.eth == &flow_tcf_mask_empty.eth)
+ break;
+ spec.eth = items->spec;
+ if (mask.eth->type) {
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_ETH_TYPE,
+ spec.eth->type);
+ eth_type_set = 1;
+ }
+ if (!is_zero_ether_addr(&mask.eth->dst)) {
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,
+ ETHER_ADDR_LEN,
+ spec.eth->dst.addr_bytes);
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->dst.addr_bytes);
+ }
+ if (!is_zero_ether_addr(&mask.eth->src)) {
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,
+ ETHER_ADDR_LEN,
+ spec.eth->src.addr_bytes);
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->src.addr_bytes);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ item_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
+ mask.vlan = flow_tcf_item_mask
+ (items, &rte_flow_item_vlan_mask,
+ &flow_tcf_mask_supported.vlan,
+ &flow_tcf_mask_empty.vlan,
+ sizeof(flow_tcf_mask_supported.vlan),
+ error);
+ assert(mask.vlan);
+ if (!eth_type_set)
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_8021Q));
+ eth_type_set = 1;
+ vlan_present = 1;
+ if (mask.vlan == &flow_tcf_mask_empty.vlan)
+ break;
+ spec.vlan = items->spec;
+ if (mask.vlan->inner_type) {
+ mnl_attr_put_u16(nlh,
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ spec.vlan->inner_type);
+ vlan_eth_type_set = 1;
+ }
+ if (mask.vlan->tci & RTE_BE16(0xe000))
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_VLAN_PRIO,
+ (rte_be_to_cpu_16
+ (spec.vlan->tci) >> 13) & 0x7);
+ if (mask.vlan->tci & RTE_BE16(0x0fff))
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_VLAN_ID,
+ rte_be_to_cpu_16
+ (spec.vlan->tci &
+ RTE_BE16(0x0fff)));
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ mask.ipv4 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv4_mask,
+ &flow_tcf_mask_supported.ipv4,
+ &flow_tcf_mask_empty.ipv4,
+ sizeof(flow_tcf_mask_supported.ipv4),
+ error);
+ assert(mask.ipv4);
+ if (!eth_type_set || !vlan_eth_type_set)
+ mnl_attr_put_u16(nlh,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IP));
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv4 == &flow_tcf_mask_empty.ipv4)
+ break;
+ spec.ipv4 = items->spec;
+ if (mask.ipv4->hdr.next_proto_id) {
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv4->hdr.next_proto_id);
+ ip_proto_set = 1;
+ }
+ if (mask.ipv4->hdr.src_addr) {
+ mnl_attr_put_u32(nlh, TCA_FLOWER_KEY_IPV4_SRC,
+ spec.ipv4->hdr.src_addr);
+ mnl_attr_put_u32(nlh,
+ TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask.ipv4->hdr.src_addr);
+ }
+ if (mask.ipv4->hdr.dst_addr) {
+ mnl_attr_put_u32(nlh, TCA_FLOWER_KEY_IPV4_DST,
+ spec.ipv4->hdr.dst_addr);
+ mnl_attr_put_u32(nlh,
+ TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask.ipv4->hdr.dst_addr);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ mask.ipv6 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv6_mask,
+ &flow_tcf_mask_supported.ipv6,
+ &flow_tcf_mask_empty.ipv6,
+ sizeof(flow_tcf_mask_supported.ipv6),
+ error);
+ assert(mask.ipv6);
+ if (!eth_type_set || !vlan_eth_type_set)
+ mnl_attr_put_u16(nlh,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IPV6));
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv6 == &flow_tcf_mask_empty.ipv6)
+ break;
+ spec.ipv6 = items->spec;
+ if (mask.ipv6->hdr.proto) {
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv6->hdr.proto);
+ ip_proto_set = 1;
+ }
+ if (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr)) {
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_IPV6_SRC,
+ sizeof(spec.ipv6->hdr.src_addr),
+ spec.ipv6->hdr.src_addr);
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(mask.ipv6->hdr.src_addr),
+ mask.ipv6->hdr.src_addr);
+ }
+ if (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr)) {
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_IPV6_DST,
+ sizeof(spec.ipv6->hdr.dst_addr),
+ spec.ipv6->hdr.dst_addr);
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(mask.ipv6->hdr.dst_addr),
+ mask.ipv6->hdr.dst_addr);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ mask.udp = flow_tcf_item_mask
+ (items, &rte_flow_item_udp_mask,
+ &flow_tcf_mask_supported.udp,
+ &flow_tcf_mask_empty.udp,
+ sizeof(flow_tcf_mask_supported.udp),
+ error);
+ assert(mask.udp);
+ if (!ip_proto_set)
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_UDP);
+ if (mask.udp == &flow_tcf_mask_empty.udp)
+ break;
+ spec.udp = items->spec;
+ if (mask.udp->hdr.src_port) {
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_UDP_SRC,
+ spec.udp->hdr.src_port);
+ mnl_attr_put_u16(nlh,
+ TCA_FLOWER_KEY_UDP_SRC_MASK,
+ mask.udp->hdr.src_port);
+ }
+ if (mask.udp->hdr.dst_port) {
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_UDP_DST,
+ spec.udp->hdr.dst_port);
+ mnl_attr_put_u16(nlh,
+ TCA_FLOWER_KEY_UDP_DST_MASK,
+ mask.udp->hdr.dst_port);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ item_flags |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ mask.tcp = flow_tcf_item_mask
+ (items, &rte_flow_item_tcp_mask,
+ &flow_tcf_mask_supported.tcp,
+ &flow_tcf_mask_empty.tcp,
+ sizeof(flow_tcf_mask_supported.tcp),
+ error);
+ assert(mask.tcp);
+ if (!ip_proto_set)
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_TCP);
+ if (mask.tcp == &flow_tcf_mask_empty.tcp)
+ break;
+ spec.tcp = items->spec;
+ if (mask.tcp->hdr.src_port) {
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_TCP_SRC,
+ spec.tcp->hdr.src_port);
+ mnl_attr_put_u16(nlh,
+ TCA_FLOWER_KEY_TCP_SRC_MASK,
+ mask.tcp->hdr.src_port);
+ }
+ if (mask.tcp->hdr.dst_port) {
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_TCP_DST,
+ spec.tcp->hdr.dst_port);
+ mnl_attr_put_u16(nlh,
+ TCA_FLOWER_KEY_TCP_DST_MASK,
+ mask.tcp->hdr.dst_port);
+ }
+ if (mask.tcp->hdr.tcp_flags) {
+ mnl_attr_put_u16
+ (nlh,
+ TCA_FLOWER_KEY_TCP_FLAGS,
+ rte_cpu_to_be_16
+ (spec.tcp->hdr.tcp_flags));
+ mnl_attr_put_u16
+ (nlh,
+ TCA_FLOWER_KEY_TCP_FLAGS_MASK,
+ rte_cpu_to_be_16
+ (mask.tcp->hdr.tcp_flags));
+ }
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+ na_flower_act = mnl_attr_nest_start(nlh, TCA_FLOWER_ACT);
+ na_act_index_cur = 1;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ struct nlattr *na_act_index;
+ struct nlattr *na_act;
+ unsigned int vlan_act;
+ unsigned int i;
+
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ conf.port_id = actions->conf;
+ if (conf.port_id->original)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == conf.port_id->id)
+ break;
+ assert(ptoi[i].ifindex);
+ na_act_index =
+ mnl_attr_nest_start(nlh, na_act_index_cur++);
+ assert(na_act_index);
+ mnl_attr_put_strz(nlh, TCA_ACT_KIND, "mirred");
+ na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
+ assert(na_act);
+ mnl_attr_put(nlh, TCA_MIRRED_PARMS,
+ sizeof(struct tc_mirred),
+ &(struct tc_mirred){
+ .action = TC_ACT_STOLEN,
+ .eaction = TCA_EGRESS_REDIR,
+ .ifindex = ptoi[i].ifindex,
+ });
+ mnl_attr_nest_end(nlh, na_act);
+ mnl_attr_nest_end(nlh, na_act_index);
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ conf.jump = actions->conf;
+ na_act_index =
+ mnl_attr_nest_start(nlh, na_act_index_cur++);
+ assert(na_act_index);
+ mnl_attr_put_strz(nlh, TCA_ACT_KIND, "gact");
+ na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
+ assert(na_act);
+ mnl_attr_put(nlh, TCA_GACT_PARMS,
+ sizeof(struct tc_gact),
+ &(struct tc_gact){
+ .action = TC_ACT_GOTO_CHAIN |
+ conf.jump->group,
+ });
+ mnl_attr_nest_end(nlh, na_act);
+ mnl_attr_nest_end(nlh, na_act_index);
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ na_act_index =
+ mnl_attr_nest_start(nlh, na_act_index_cur++);
+ assert(na_act_index);
+ mnl_attr_put_strz(nlh, TCA_ACT_KIND, "gact");
+ na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
+ assert(na_act);
+ mnl_attr_put(nlh, TCA_GACT_PARMS,
+ sizeof(struct tc_gact),
+ &(struct tc_gact){
+ .action = TC_ACT_SHOT,
+ });
+ mnl_attr_nest_end(nlh, na_act);
+ mnl_attr_nest_end(nlh, na_act_index);
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ /*
+ * Driver adds the count action implicitly for
+ * each rule it creates.
+ */
+ ret = flow_tcf_translate_action_count(dev,
+ dev_flow, error);
+ if (ret < 0)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ conf.of_push_vlan = NULL;
+ vlan_act = TCA_VLAN_ACT_POP;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ conf.of_push_vlan = actions->conf;
+ vlan_act = TCA_VLAN_ACT_PUSH;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ conf.of_set_vlan_vid = actions->conf;
+ if (na_vlan_id)
+ goto override_na_vlan_id;
+ vlan_act = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ conf.of_set_vlan_pcp = actions->conf;
+ if (na_vlan_priority)
+ goto override_na_vlan_priority;
+ vlan_act = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+action_of_vlan:
+ na_act_index =
+ mnl_attr_nest_start(nlh, na_act_index_cur++);
+ assert(na_act_index);
+ mnl_attr_put_strz(nlh, TCA_ACT_KIND, "vlan");
+ na_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);
+ assert(na_act);
+ mnl_attr_put(nlh, TCA_VLAN_PARMS,
+ sizeof(struct tc_vlan),
+ &(struct tc_vlan){
+ .action = TC_ACT_PIPE,
+ .v_action = vlan_act,
+ });
+ if (vlan_act == TCA_VLAN_ACT_POP) {
+ mnl_attr_nest_end(nlh, na_act);
+ mnl_attr_nest_end(nlh, na_act_index);
+ break;
+ }
+ if (vlan_act == TCA_VLAN_ACT_PUSH)
+ mnl_attr_put_u16(nlh,
+ TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ conf.of_push_vlan->ethertype);
+ na_vlan_id = mnl_nlmsg_get_payload_tail(nlh);
+ mnl_attr_put_u16(nlh, TCA_VLAN_PAD, 0);
+ na_vlan_priority = mnl_nlmsg_get_payload_tail(nlh);
+ mnl_attr_put_u8(nlh, TCA_VLAN_PAD, 0);
+ mnl_attr_nest_end(nlh, na_act);
+ mnl_attr_nest_end(nlh, na_act_index);
+ if (actions->type ==
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
+override_na_vlan_id:
+ na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID;
+ *(uint16_t *)mnl_attr_get_payload(na_vlan_id) =
+ rte_be_to_cpu_16
+ (conf.of_set_vlan_vid->vlan_vid);
+ } else if (actions->type ==
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
+override_na_vlan_priority:
+ na_vlan_priority->nla_type =
+ TCA_VLAN_PUSH_VLAN_PRIORITY;
+ *(uint8_t *)mnl_attr_get_payload
+ (na_vlan_priority) =
+ conf.of_set_vlan_pcp->vlan_pcp;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ na_act_index =
+ mnl_attr_nest_start(nlh, na_act_index_cur++);
+ flow_tcf_create_pedit_mnl_msg(nlh,
+ &actions, item_flags);
+ mnl_attr_nest_end(nlh, na_act_index);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ assert(na_flower);
+ assert(na_flower_act);
+ mnl_attr_nest_end(nlh, na_flower_act);
+ mnl_attr_nest_end(nlh, na_flower);
+ return 0;
+}
+
+/**
+ * Send Netlink message with acknowledgment.
+ *
+ * @param ctx
+ * Flow context to use.
+ * @param nlh
+ * Message to send. This function always raises the NLM_F_ACK flag before
+ * sending.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_tcf_nl_ack(struct mlx5_flow_tcf_context *ctx, struct nlmsghdr *nlh)
+{
+ alignas(struct nlmsghdr)
+ uint8_t ans[mnl_nlmsg_size(sizeof(struct nlmsgerr)) +
+ nlh->nlmsg_len - sizeof(*nlh)];
+ uint32_t seq = ctx->seq++;
+ struct mnl_socket *nl = ctx->nl;
+ int ret;
+
+ nlh->nlmsg_flags |= NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+ ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
+ if (ret != -1)
+ ret = mnl_socket_recvfrom(nl, ans, sizeof(ans));
+ if (ret != -1)
+ ret = mnl_cb_run
+ (ans, ret, seq, mnl_socket_get_portid(nl), NULL, NULL);
+ if (ret > 0)
+ return 0;
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Apply flow to E-Switch by sending Netlink message.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to the sub flow.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
+ struct mlx5_flow *dev_flow;
+ struct nlmsghdr *nlh;
+
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ /* E-Switch flow can't be expanded. */
+ assert(!LIST_NEXT(dev_flow, next));
+ nlh = dev_flow->tcf.nlh;
+ nlh->nlmsg_type = RTM_NEWTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ if (!flow_tcf_nl_ack(ctx, nlh))
+ return 0;
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to create TC flow rule");
+}
+
+/**
+ * Remove flow from E-Switch by sending Netlink message.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to the sub flow.
+ */
+static void
+flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
+ struct mlx5_flow *dev_flow;
+ struct nlmsghdr *nlh;
+
+ if (!flow)
+ return;
+ if (flow->counter) {
+ if (--flow->counter->ref_cnt == 0) {
+ rte_free(flow->counter);
+ flow->counter = NULL;
+ }
+ }
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ if (!dev_flow)
+ return;
+ /* E-Switch flow can't be expanded. */
+ assert(!LIST_NEXT(dev_flow, next));
+ nlh = dev_flow->tcf.nlh;
+ nlh->nlmsg_type = RTM_DELTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ flow_tcf_nl_ack(ctx, nlh);
+}
+
+/**
+ * Remove flow from E-Switch and release resources of the device flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to the sub flow.
+ */
+static void
+flow_tcf_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ if (!flow)
+ return;
+ flow_tcf_remove(dev, flow);
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ if (!dev_flow)
+ return;
+ /* E-Switch flow can't be expanded. */
+ assert(!LIST_NEXT(dev_flow, next));
+ LIST_REMOVE(dev_flow, next);
+ rte_free(dev_flow);
+}
+
+/**
+ * Helper routine for figuring the space size required for a parse buffer.
+ *
+ * @param array
+ * array of values to use.
+ * @param idx
+ * Current location in array.
+ * @param value
+ * Value to compare with.
+ *
+ * @return
+ * The maximum between the given value and the array value on index.
+ */
+static uint16_t
+flow_tcf_arr_val_max(uint16_t array[], int idx, uint16_t value)
+{
+ return idx < 0 ? (value) : RTE_MAX((array)[idx], value);
+}
+
+/**
+ * Parse rtnetlink message attributes filling the attribute table with the info
+ * retrieved.
+ *
+ * @param tb
+ * Attribute table to be filled.
+ * @param[out] max
+ * Maxinum entry in the attribute table.
+ * @param rte
+ * The attributes section in the message to be parsed.
+ * @param len
+ * The length of the attributes section in the message.
+ */
+static void
+flow_tcf_nl_parse_rtattr(struct rtattr *tb[], int max,
+ struct rtattr *rta, int len)
+{
+ unsigned short type;
+ memset(tb, 0, sizeof(struct rtattr *) * (max + 1));
+ while (RTA_OK(rta, len)) {
+ type = rta->rta_type;
+ if (type <= max && !tb[type])
+ tb[type] = rta;
+ rta = RTA_NEXT(rta, len);
+ }
+}
+
+/**
+ * Extract flow counters from flower action.
+ *
+ * @param rta
+ * flower action stats properties in the Netlink message received.
+ * @param rta_type
+ * The backward sequence of rta_types, as written in the attribute table,
+ * we need to traverse in order to get to the requested object.
+ * @param idx
+ * Current location in rta_type table.
+ * @param[out] data
+ * data holding the count statistics of the rte_flow retrieved from
+ * the message.
+ *
+ * @return
+ * 0 if data was found and retrieved, -1 otherwise.
+ */
+static int
+flow_tcf_nl_action_stats_parse_and_get(struct rtattr *rta,
+ uint16_t rta_type[], int idx,
+ struct gnet_stats_basic *data)
+{
+ int tca_stats_max = flow_tcf_arr_val_max(rta_type, idx,
+ TCA_STATS_BASIC);
+ struct rtattr *tbs[tca_stats_max + 1];
+
+ if (rta == NULL || idx < 0)
+ return -1;
+ flow_tcf_nl_parse_rtattr(tbs, tca_stats_max,
+ RTA_DATA(rta), RTA_PAYLOAD(rta));
+ switch (rta_type[idx]) {
+ case TCA_STATS_BASIC:
+ if (tbs[TCA_STATS_BASIC]) {
+ memcpy(data, RTA_DATA(tbs[TCA_STATS_BASIC]),
+ RTE_MIN(RTA_PAYLOAD(tbs[TCA_STATS_BASIC]),
+ sizeof(*data)));
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -1;
+}
+
+/**
+ * Parse flower single action retrieving the requested action attribute,
+ * if found.
+ *
+ * @param arg
+ * flower action properties in the Netlink message received.
+ * @param rta_type
+ * The backward sequence of rta_types, as written in the attribute table,
+ * we need to traverse in order to get to the requested object.
+ * @param idx
+ * Current location in rta_type table.
+ * @param[out] data
+ * Count statistics retrieved from the message query.
+ *
+ * @return
+ * 0 if data was found and retrieved, -1 otherwise.
+ */
+static int
+flow_tcf_nl_parse_one_action_and_get(struct rtattr *arg,
+ uint16_t rta_type[], int idx, void *data)
+{
+ int tca_act_max = flow_tcf_arr_val_max(rta_type, idx, TCA_ACT_STATS);
+ struct rtattr *tb[tca_act_max + 1];
+
+ if (arg == NULL || idx < 0)
+ return -1;
+ flow_tcf_nl_parse_rtattr(tb, tca_act_max,
+ RTA_DATA(arg), RTA_PAYLOAD(arg));
+ if (tb[TCA_ACT_KIND] == NULL)
+ return -1;
+ switch (rta_type[idx]) {
+ case TCA_ACT_STATS:
+ if (tb[TCA_ACT_STATS])
+ return flow_tcf_nl_action_stats_parse_and_get
+ (tb[TCA_ACT_STATS],
+ rta_type, --idx,
+ (struct gnet_stats_basic *)data);
+ break;
+ default:
+ break;
+ }
+ return -1;
+}
+
+/**
+ * Parse flower action section in the message retrieving the requested
+ * attribute from the first action that provides it.
+ *
+ * @param opt
+ * flower section in the Netlink message received.
+ * @param rta_type
+ * The backward sequence of rta_types, as written in the attribute table,
+ * we need to traverse in order to get to the requested object.
+ * @param idx
+ * Current location in rta_type table.
+ * @param[out] data
+ * data retrieved from the message query.
+ *
+ * @return
+ * 0 if data was found and retrieved, -1 otherwise.
+ */
+static int
+flow_tcf_nl_action_parse_and_get(struct rtattr *arg,
+ uint16_t rta_type[], int idx, void *data)
+{
+ struct rtattr *tb[TCA_ACT_MAX_PRIO + 1];
+ int i;
+
+ if (arg == NULL || idx < 0)
+ return -1;
+ flow_tcf_nl_parse_rtattr(tb, TCA_ACT_MAX_PRIO,
+ RTA_DATA(arg), RTA_PAYLOAD(arg));
+ switch (rta_type[idx]) {
+ /*
+ * flow counters are stored in the actions defined by the flow
+ * and not in the flow itself, therefore we need to traverse the
+ * flower chain of actions in search for them.
+ *
+ * Note that the index is not decremented here.
+ */
+ case TCA_ACT_STATS:
+ for (i = 0; i <= TCA_ACT_MAX_PRIO; i++) {
+ if (tb[i] &&
+ !flow_tcf_nl_parse_one_action_and_get(tb[i],
+ rta_type,
+ idx, data))
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -1;
+}
+
+/**
+ * Parse flower classifier options in the message, retrieving the requested
+ * attribute if found.
+ *
+ * @param opt
+ * flower section in the Netlink message received.
+ * @param rta_type
+ * The backward sequence of rta_types, as written in the attribute table,
+ * we need to traverse in order to get to the requested object.
+ * @param idx
+ * Current location in rta_type table.
+ * @param[out] data
+ * data retrieved from the message query.
+ *
+ * @return
+ * 0 if data was found and retrieved, -1 otherwise.
+ */
+static int
+flow_tcf_nl_opts_parse_and_get(struct rtattr *opt,
+ uint16_t rta_type[], int idx, void *data)
+{
+ int tca_flower_max = flow_tcf_arr_val_max(rta_type, idx,
+ TCA_FLOWER_ACT);
+ struct rtattr *tb[tca_flower_max + 1];
+
+ if (!opt || idx < 0)
+ return -1;
+ flow_tcf_nl_parse_rtattr(tb, tca_flower_max,
+ RTA_DATA(opt), RTA_PAYLOAD(opt));
+ switch (rta_type[idx]) {
+ case TCA_FLOWER_ACT:
+ if (tb[TCA_FLOWER_ACT])
+ return flow_tcf_nl_action_parse_and_get
+ (tb[TCA_FLOWER_ACT],
+ rta_type, --idx, data);
+ break;
+ default:
+ break;
+ }
+ return -1;
+}
+
+/**
+ * Parse Netlink reply on filter query, retrieving the flow counters.
+ *
+ * @param nlh
+ * Message received from Netlink.
+ * @param rta_type
+ * The backward sequence of rta_types, as written in the attribute table,
+ * we need to traverse in order to get to the requested object.
+ * @param idx
+ * Current location in rta_type table.
+ * @param[out] data
+ * data retrieved from the message query.
+ *
+ * @return
+ * 0 if data was found and retrieved, -1 otherwise.
+ */
+static int
+flow_tcf_nl_filter_parse_and_get(struct nlmsghdr *cnlh,
+ uint16_t rta_type[], int idx, void *data)
+{
+ struct nlmsghdr *nlh = cnlh;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len = nlh->nlmsg_len;
+ int tca_max = flow_tcf_arr_val_max(rta_type, idx, TCA_OPTIONS);
+ struct rtattr *tb[tca_max + 1];
+
+ if (idx < 0)
+ return -1;
+ if (nlh->nlmsg_type != RTM_NEWTFILTER &&
+ nlh->nlmsg_type != RTM_GETTFILTER &&
+ nlh->nlmsg_type != RTM_DELTFILTER)
+ return -1;
+ len -= NLMSG_LENGTH(sizeof(*t));
+ if (len < 0)
+ return -1;
+ flow_tcf_nl_parse_rtattr(tb, tca_max, TCA_RTA(t), len);
+ /* Not a TC flower flow - bail out */
+ if (!tb[TCA_KIND] ||
+ strcmp(RTA_DATA(tb[TCA_KIND]), "flower"))
+ return -1;
+ switch (rta_type[idx]) {
+ case TCA_OPTIONS:
+ if (tb[TCA_OPTIONS])
+ return flow_tcf_nl_opts_parse_and_get(tb[TCA_OPTIONS],
+ rta_type,
+ --idx, data);
+ break;
+ default:
+ break;
+ }
+ return -1;
+}
+
+/**
+ * A callback to parse Netlink reply on TC flower query.
+ *
+ * @param nlh
+ * Message received from Netlink.
+ * @param[out] data
+ * Pointer to data area to be filled by the parsing routine.
+ * assumed to be a pinter to struct flow_tcf_stats_basic.
+ *
+ * @return
+ * MNL_CB_OK value.
+ */
+static int
+flow_tcf_nl_message_get_stats_basic(const struct nlmsghdr *nlh, void *data)
+{
+ /*
+ * The backward sequence of rta_types to pass in order to get
+ * to the counters.
+ */
+ uint16_t rta_type[] = { TCA_STATS_BASIC, TCA_ACT_STATS,
+ TCA_FLOWER_ACT, TCA_OPTIONS };
+ struct flow_tcf_stats_basic *sb_data = data;
+ union {
+ const struct nlmsghdr *c;
+ struct nlmsghdr *nc;
+ } tnlh = { .c = nlh };
+
+ if (!flow_tcf_nl_filter_parse_and_get(tnlh.nc, rta_type,
+ RTE_DIM(rta_type) - 1,
+ (void *)&sb_data->counters))
+ sb_data->valid = true;
+ return MNL_CB_OK;
+}
+
+/**
+ * Query a TC flower rule for its statistics via netlink.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the sub flow.
+ * @param[out] data
+ * data retrieved by the query.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_tcf_query_count(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct flow_tcf_stats_basic sb_data = { 0 };
+ struct rte_flow_query_count *qc = data;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
+ struct mnl_socket *nl = ctx->nl;
+ struct mlx5_flow *dev_flow;
+ struct nlmsghdr *nlh;
+ uint32_t seq = priv->tcf_context->seq++;
+ ssize_t ret;
+ assert(qc);
+
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ /* E-Switch flow can't be expanded. */
+ assert(!LIST_NEXT(dev_flow, next));
+ if (!dev_flow->flow->counter)
+ goto notsup_exit;
+ nlh = dev_flow->tcf.nlh;
+ nlh->nlmsg_type = RTM_GETTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ECHO;
+ nlh->nlmsg_seq = seq;
+ if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) == -1)
+ goto error_exit;
+ do {
+ ret = mnl_socket_recvfrom(nl, ctx->buf, ctx->buf_size);
+ if (ret <= 0)
+ break;
+ ret = mnl_cb_run(ctx->buf, ret, seq,
+ mnl_socket_get_portid(nl),
+ flow_tcf_nl_message_get_stats_basic,
+ (void *)&sb_data);
+ } while (ret > 0);
+ /* Return the delta from last reset. */
+ if (sb_data.valid) {
+ /* Return the delta from last reset. */
+ qc->hits_set = 1;
+ qc->bytes_set = 1;
+ qc->hits = sb_data.counters.packets - flow->counter->hits;
+ qc->bytes = sb_data.counters.bytes - flow->counter->bytes;
+ if (qc->reset) {
+ flow->counter->hits = sb_data.counters.packets;
+ flow->counter->bytes = sb_data.counters.bytes;
+ }
+ return 0;
+ }
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow does not have counter");
+error_exit:
+ return rte_flow_error_set
+ (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "netlink: failed to read flow rule counters");
+notsup_exit:
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "counters are not available.");
+}
+
+/**
+ * Query a flow.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+static int
+flow_tcf_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret = -EINVAL;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_tcf_query_count(dev, flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ return ret;
+}
+
+const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops = {
+ .validate = flow_tcf_validate,
+ .prepare = flow_tcf_prepare,
+ .translate = flow_tcf_translate,
+ .apply = flow_tcf_apply,
+ .remove = flow_tcf_remove,
+ .destroy = flow_tcf_destroy,
+ .query = flow_tcf_query,
+};
+
+/**
+ * Create and configure a libmnl socket for Netlink flow rules.
+ *
+ * @return
+ * A valid libmnl socket object pointer on success, NULL otherwise and
+ * rte_errno is set.
+ */
+static struct mnl_socket *
+flow_tcf_mnl_socket_create(void)
+{
+ struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
+
+ if (nl) {
+ mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
+ sizeof(int));
+ if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
+ return nl;
+ }
+ rte_errno = errno;
+ if (nl)
+ mnl_socket_close(nl);
+ return NULL;
+}
+
+/**
+ * Destroy a libmnl socket.
+ *
+ * @param nl
+ * Libmnl socket of the @p NETLINK_ROUTE kind.
+ */
+static void
+flow_tcf_mnl_socket_destroy(struct mnl_socket *nl)
+{
+ if (nl)
+ mnl_socket_close(nl);
+}
+
+/**
+ * Initialize ingress qdisc of a given network interface.
+ *
+ * @param ctx
+ * Pointer to tc-flower context to use.
+ * @param ifindex
+ * Index of network interface to initialize.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
+ unsigned int ifindex, struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ alignas(struct nlmsghdr)
+ uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)];
+
+ /* Destroy existing ingress qdisc and everything attached to it. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_DELQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ /* Ignore errors when qdisc is already absent. */
+ if (flow_tcf_nl_ack(ctx, nlh) &&
+ rte_errno != EINVAL && rte_errno != ENOENT)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to remove ingress"
+ " qdisc");
+ /* Create fresh ingress qdisc. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_NEWQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
+ if (flow_tcf_nl_ack(ctx, nlh))
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to create ingress"
+ " qdisc");
+ return 0;
+}
+
+/**
+ * Create libmnl context for Netlink flow rules.
+ *
+ * @return
+ * A valid libmnl socket object pointer on success, NULL otherwise and
+ * rte_errno is set.
+ */
+struct mlx5_flow_tcf_context *
+mlx5_flow_tcf_context_create(void)
+{
+ struct mlx5_flow_tcf_context *ctx = rte_zmalloc(__func__,
+ sizeof(*ctx),
+ sizeof(uint32_t));
+ if (!ctx)
+ goto error;
+ ctx->nl = flow_tcf_mnl_socket_create();
+ if (!ctx->nl)
+ goto error;
+ ctx->buf_size = MNL_SOCKET_BUFFER_SIZE;
+ ctx->buf = rte_zmalloc(__func__,
+ ctx->buf_size, sizeof(uint32_t));
+ if (!ctx->buf)
+ goto error;
+ ctx->seq = random();
+ return ctx;
+error:
+ mlx5_flow_tcf_context_destroy(ctx);
+ return NULL;
+}
+
+/**
+ * Destroy a libmnl context.
+ *
+ * @param ctx
+ * Libmnl socket of the @p NETLINK_ROUTE kind.
+ */
+void
+mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx)
+{
+ if (!ctx)
+ return;
+ flow_tcf_mnl_socket_destroy(ctx->nl);
+ rte_free(ctx->buf);
+ rte_free(ctx);
+}
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
new file mode 100644
index 00000000..81bc39f9
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -0,0 +1,1825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <netinet/in.h>
+#include <sys/queue.h>
+#include <stdalign.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+
+#include "mlx5.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+#include "mlx5_glue.h"
+#include "mlx5_flow.h"
+
+/**
+ * Create Verbs flow counter with Verbs library.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] counter
+ * mlx5 flow counter object, contains the counter id,
+ * handle of created Verbs flow counter is returned
+ * in cs field (if counters are supported).
+ *
+ * @return
+ * 0 On success else a negative errno value is returned
+ * and rte_errno is set.
+ */
+static int
+flow_verbs_counter_create(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *counter)
+{
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
+ struct priv *priv = dev->data->dev_private;
+ struct ibv_counter_set_init_attr init = {
+ .counter_set_id = counter->id};
+
+ counter->cs = mlx5_glue->create_counter_set(priv->ctx, &init);
+ if (!counter->cs) {
+ rte_errno = ENOTSUP;
+ return -ENOTSUP;
+ }
+ return 0;
+#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ struct priv *priv = dev->data->dev_private;
+ struct ibv_counters_init_attr init = {0};
+ struct ibv_counter_attach_attr attach = {0};
+ int ret;
+
+ counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
+ if (!counter->cs) {
+ rte_errno = ENOTSUP;
+ return -ENOTSUP;
+ }
+ attach.counter_desc = IBV_COUNTER_PACKETS;
+ attach.index = 0;
+ ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
+ if (!ret) {
+ attach.counter_desc = IBV_COUNTER_BYTES;
+ attach.index = 1;
+ ret = mlx5_glue->attach_counters
+ (counter->cs, &attach, NULL);
+ }
+ if (ret) {
+ claim_zero(mlx5_glue->destroy_counters(counter->cs));
+ counter->cs = NULL;
+ rte_errno = ret;
+ return -ret;
+ }
+ return 0;
+#else
+ (void)dev;
+ (void)counter;
+ rte_errno = ENOTSUP;
+ return -ENOTSUP;
+#endif
+}
+
+/**
+ * Get a flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * A pointer to the counter, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt;
+ int ret;
+
+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (!cnt->shared || cnt->shared != shared)
+ continue;
+ if (cnt->id != id)
+ continue;
+ cnt->ref_cnt++;
+ return cnt;
+ }
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ cnt->id = id;
+ cnt->shared = shared;
+ cnt->ref_cnt = 1;
+ cnt->hits = 0;
+ cnt->bytes = 0;
+ /* Create counter with Verbs. */
+ ret = flow_verbs_counter_create(dev, cnt);
+ if (!ret) {
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+ }
+ /* Some error occurred in Verbs library. */
+ rte_free(cnt);
+ rte_errno = -ret;
+ return NULL;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] counter
+ * Pointer to the counter handler.
+ */
+static void
+flow_verbs_counter_release(struct mlx5_flow_counter *counter)
+{
+ if (--counter->ref_cnt == 0) {
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
+ claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
+#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ claim_zero(mlx5_glue->destroy_counters(counter->cs));
+#endif
+ LIST_REMOVE(counter, next);
+ rte_free(counter);
+ }
+}
+
+/**
+ * Query a flow counter via Verbs library call.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+static int
+flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow, void *data,
+ struct rte_flow_error *error)
+{
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
+ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
+ struct rte_flow_query_count *qc = data;
+ uint64_t counters[2] = {0, 0};
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = flow->counter->cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int err = mlx5_glue->query_counter_set(&query_cs_attr,
+ &query_out);
+#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ int err = mlx5_glue->query_counters
+ (flow->counter->cs, counters,
+ RTE_DIM(counters),
+ IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
+#endif
+ if (err)
+ return rte_flow_error_set
+ (error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ qc->hits_set = 1;
+ qc->bytes_set = 1;
+ qc->hits = counters[0] - flow->counter->hits;
+ qc->bytes = counters[1] - flow->counter->bytes;
+ if (qc->reset) {
+ flow->counter->hits = counters[0];
+ flow->counter->bytes = counters[1];
+ }
+ return 0;
+ }
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow does not have counter");
+#else
+ (void)flow;
+ (void)data;
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "counters are not available");
+#endif
+}
+
+/**
+ * Add a verbs item specification into @p flow.
+ *
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] src
+ * Create specification.
+ * @param[in] size
+ * Size in bytes of the specification to copy.
+ */
+static void
+flow_verbs_spec_add(struct mlx5_flow *flow, void *src, unsigned int size)
+{
+ struct mlx5_flow_verbs *verbs = &flow->verbs;
+
+ if (verbs->specs) {
+ void *dst;
+
+ dst = (void *)(verbs->specs + verbs->size);
+ memcpy(dst, src, size);
+ ++verbs->attr->num_of_specs;
+ }
+ verbs->size += size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit field with all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to dev_flow structure.
+ */
+static void
+flow_verbs_translate_item_eth(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ if (spec) {
+ unsigned int i;
+
+ memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.val.ether_type = spec->type;
+ memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+ eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ }
+ eth.val.ether_type &= eth.mask.ether_type;
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow_verbs_spec_add(dev_flow, &eth, size);
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+}
+
+/**
+ * Update the VLAN tag in the Verbs Ethernet specification.
+ * This function assumes that the input is valid and there is space to add
+ * the requested item.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] eth
+ * Verbs structure containing the VLAN information to copy.
+ */
+static void
+flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
+ struct ibv_flow_spec_eth *eth)
+{
+ unsigned int i;
+ const enum ibv_flow_spec_type search = eth->type;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ struct ibv_flow_spec_eth *e =
+ (struct ibv_flow_spec_eth *)hdr;
+
+ e->val.vlan_tag = eth->val.vlan_tag;
+ e->mask.vlan_tag = eth->mask.vlan_tag;
+ e->val.ether_type = eth->val.ether_type;
+ e->mask.ether_type = eth->mask.ether_type;
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that holds all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to dev_flow structure.
+ */
+static void
+flow_verbs_translate_item_vlan(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ if (spec) {
+ eth.val.vlan_tag = spec->tci;
+ eth.mask.vlan_tag = mask->tci;
+ eth.val.vlan_tag &= eth.mask.vlan_tag;
+ eth.val.ether_type = spec->inner_type;
+ eth.mask.ether_type = mask->inner_type;
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ if (!(*item_flags & l2m)) {
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ flow_verbs_spec_add(dev_flow, &eth, size);
+ } else {
+ flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
+ size = 0; /* Only an update is done in eth specification. */
+ }
+ *item_flags |= tunnel ?
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_ipv4(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ struct ibv_flow_spec_ipv4_ext ipv4 = {
+ .type = IBV_FLOW_SPEC_IPV4_EXT |
+ (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (spec) {
+ ipv4.val = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4.val.src_ip &= ipv4.mask.src_ip;
+ ipv4.val.dst_ip &= ipv4.mask.dst_ip;
+ ipv4.val.proto &= ipv4.mask.proto;
+ ipv4.val.tos &= ipv4.mask.tos;
+ }
+ dev_flow->verbs.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, tunnel,
+ MLX5_IPV4_LAYER_TYPES,
+ MLX5_IPV4_IBV_RX_HASH);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
+ flow_verbs_spec_add(dev_flow, &ipv4, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_ipv6(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
+ struct ibv_flow_spec_ipv6 ipv6 = {
+ .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (spec) {
+ unsigned int i;
+ uint32_t vtc_flow_val;
+ uint32_t vtc_flow_mask;
+
+ memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6.val.src_ip));
+ memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6.val.dst_ip));
+ memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6.mask.src_ip));
+ memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6.mask.dst_ip));
+ vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
+ vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
+ ipv6.val.flow_label =
+ rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.val.next_hdr = spec->hdr.proto;
+ ipv6.val.hop_limit = spec->hdr.hop_limits;
+ ipv6.mask.flow_label =
+ rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.mask.next_hdr = mask->hdr.proto;
+ ipv6.mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+ ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+ ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+ }
+ ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.traffic_class &= ipv6.mask.traffic_class;
+ ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+ ipv6.val.hop_limit &= ipv6.mask.hop_limit;
+ }
+ dev_flow->verbs.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, tunnel,
+ MLX5_IPV6_LAYER_TYPES,
+ MLX5_IPV6_IBV_RX_HASH);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L3;
+ flow_verbs_spec_add(dev_flow, &ipv6, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_udp(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp udp = {
+ .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ if (spec) {
+ udp.val.dst_port = spec->hdr.dst_port;
+ udp.val.src_port = spec->hdr.src_port;
+ udp.mask.dst_port = mask->hdr.dst_port;
+ udp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp.val.src_port &= udp.mask.src_port;
+ udp.val.dst_port &= udp.mask.dst_port;
+ }
+ dev_flow->verbs.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
+ flow_verbs_spec_add(dev_flow, &udp, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_tcp(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp tcp = {
+ .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ *item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (spec) {
+ tcp.val.dst_port = spec->hdr.dst_port;
+ tcp.val.src_port = spec->hdr.src_port;
+ tcp.mask.dst_port = mask->hdr.dst_port;
+ tcp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp.val.src_port &= tcp.mask.src_port;
+ tcp.val.dst_port &= tcp.mask.dst_port;
+ }
+ dev_flow->verbs.hash_fields |=
+ mlx5_flow_hashfields_adjust(dev_flow, tunnel, ETH_RSS_TCP,
+ (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP));
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L4;
+ flow_verbs_spec_add(dev_flow, &tcp, size);
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_vxlan(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ flow_verbs_spec_add(dev_flow, &vxlan, size);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ *item_flags |= MLX5_FLOW_LAYER_VXLAN;
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_vxlan_gpe(const struct rte_flow_item *item,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan_gpe = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan_gpe.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan_gpe.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
+ }
+ flow_verbs_spec_add(dev_flow, &vxlan_gpe, size);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ *item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+}
+
+/**
+ * Update the protocol in Verbs IPv4/IPv6 spec.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] search
+ * Specification type to search in order to update the IP protocol.
+ * @param[in] protocol
+ * Protocol value to set if none is present in the specification.
+ */
+static void
+flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ enum ibv_flow_spec_type search,
+ uint8_t protocol)
+{
+ unsigned int i;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ if (!attr)
+ return;
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ union {
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ } ip;
+
+ switch (search) {
+ case IBV_FLOW_SPEC_IPV4_EXT:
+ ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
+ if (!ip.ipv4->val.proto) {
+ ip.ipv4->val.proto = protocol;
+ ip.ipv4->mask.proto = 0xff;
+ }
+ break;
+ case IBV_FLOW_SPEC_IPV6:
+ ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
+ if (!ip.ipv6->val.next_hdr) {
+ ip.ipv6->val.next_hdr = protocol;
+ ip.ipv6->mask.next_hdr = 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested item
+ * into the flow.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_gre(const struct rte_flow_item *item __rte_unused,
+ uint64_t *item_flags,
+ struct mlx5_flow *dev_flow)
+{
+ struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+#else
+ const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_gre);
+ struct ibv_flow_spec_gre tunnel = {
+ .type = IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ if (spec) {
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.protocol &= tunnel.mask.protocol;
+ tunnel.val.key &= tunnel.mask.key;
+ }
+#endif
+ if (*item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+ IBV_FLOW_SPEC_IPV4_EXT,
+ IPPROTO_GRE);
+ else
+ flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+ IBV_FLOW_SPEC_IPV6,
+ IPPROTO_GRE);
+ flow_verbs_spec_add(dev_flow, &tunnel, size);
+ verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ *item_flags |= MLX5_FLOW_LAYER_GRE;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] item_flags
+ * Bit mask that marks all detected items.
+ * @param[in, out] dev_flow
+ * Pointer to sepacific flow structure.
+ */
+static void
+flow_verbs_translate_item_mpls(const struct rte_flow_item *item __rte_unused,
+ uint64_t *action_flags __rte_unused,
+ struct mlx5_flow *dev_flow __rte_unused)
+{
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ if (spec) {
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ flow_verbs_spec_add(dev_flow, &mpls, size);
+ dev_flow->verbs.attr->priority = MLX5_PRIORITY_MAP_L2;
+ *action_flags |= MLX5_FLOW_LAYER_MPLS;
+#endif
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_drop(uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ struct ibv_flow_spec_action_drop drop = {
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+
+ flow_verbs_spec_add(dev_flow, &drop, size);
+ *action_flags |= MLX5_FLOW_ACTION_DROP;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_queue(const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_action_queue *queue = action->conf;
+ struct rte_flow *flow = dev_flow->flow;
+
+ if (flow->queue)
+ (*flow->queue)[0] = queue->index;
+ flow->rss.queue_num = 1;
+ *action_flags |= MLX5_FLOW_ACTION_QUEUE;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_rss(const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_action_rss *rss = action->conf;
+ struct rte_flow *flow = dev_flow->flow;
+
+ if (flow->queue)
+ memcpy((*flow->queue), rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ flow->rss.queue_num = rss->queue_num;
+ memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
+ flow->rss.types = rss->types;
+ flow->rss.level = rss->level;
+ *action_flags |= MLX5_FLOW_ACTION_RSS;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_flag
+ (const struct rte_flow_action *action __rte_unused,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
+ };
+ *action_flags |= MLX5_FLOW_ACTION_MARK;
+ flow_verbs_spec_add(dev_flow, &tag, size);
+}
+
+/**
+ * Update verbs specification to modify the flag to mark.
+ *
+ * @param[in, out] verbs
+ * Pointer to the mlx5_flow_verbs structure.
+ * @param[in] mark_id
+ * Mark identifier to replace the flag.
+ */
+static void
+flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+{
+ struct ibv_spec_header *hdr;
+ int i;
+
+ if (!verbs)
+ return;
+ /* Update Verbs specification. */
+ hdr = (struct ibv_spec_header *)verbs->specs;
+ if (!hdr)
+ return;
+ for (i = 0; i != verbs->attr->num_of_specs; ++i) {
+ if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
+ struct ibv_flow_spec_action_tag *t =
+ (struct ibv_flow_spec_action_tag *)hdr;
+
+ t->tag_id = mlx5_flow_mark_set(mark_id);
+ }
+ hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ */
+static void
+flow_verbs_translate_action_mark(const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ };
+ struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
+
+ if (*action_flags & MLX5_FLOW_ACTION_FLAG) {
+ flow_verbs_mark_update(verbs, mark->id);
+ size = 0;
+ } else {
+ tag.tag_id = mlx5_flow_mark_set(mark->id);
+ flow_verbs_spec_add(dev_flow, &tag, size);
+ }
+ *action_flags |= MLX5_FLOW_ACTION_MARK;
+}
+
+/**
+ * Convert the @p action into a Verbs specification. This function assumes that
+ * the input is valid and that there is space to insert the requested action
+ * into the flow. This function also return the action that was added.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] action_flags
+ * Pointer to the detected actions.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 On success else a negative errno value is returned and rte_errno is set.
+ */
+static int
+flow_verbs_translate_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t *action_flags,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_count *count = action->conf;
+ struct rte_flow *flow = dev_flow->flow;
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
+ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+ struct ibv_flow_spec_counter_action counter = {
+ .type = IBV_FLOW_SPEC_ACTION_COUNT,
+ .size = size,
+ };
+#endif
+
+ if (!flow->counter) {
+ flow->counter = flow_verbs_counter_new(dev, count->shared,
+ count->id);
+ if (!flow->counter)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot get counter"
+ " context.");
+ }
+ *action_flags |= MLX5_FLOW_ACTION_COUNT;
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
+ counter.counter_set_handle = flow->counter->cs->handle;
+ flow_verbs_spec_add(dev_flow, &counter, size);
+#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ counter.counters = flow->counter->cs;
+ flow_verbs_spec_add(dev_flow, &counter, size);
+#endif
+ return 0;
+}
+
+/**
+ * Internal validation function. For validating both actions and items.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_verbs_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint64_t action_flags = 0;
+ uint64_t item_flags = 0;
+ int tunnel = 0;
+ uint8_t next_protocol = 0xff;
+
+ if (items == NULL)
+ return -1;
+ ret = mlx5_flow_validate_attributes(dev, attr, error);
+ if (ret < 0)
+ return ret;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int ret = 0;
+
+ tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_validate_item_vlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &rte_flow_item_tcp_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_validate_item_vxlan_gpe(items,
+ item_flags,
+ dev, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_validate_item_gre(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_validate_item_mpls(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ if (next_protocol != 0xff &&
+ next_protocol != IPPROTO_MPLS)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ item_flags |= MLX5_FLOW_LAYER_MPLS;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = mlx5_flow_validate_action_flag(action_flags,
+ attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = mlx5_flow_validate_action_mark(actions,
+ action_flags,
+ attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_validate_action_drop(action_flags,
+ attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(actions,
+ action_flags, dev,
+ attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mlx5_flow_validate_action_rss(actions,
+ action_flags, dev,
+ attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_validate_action_count(dev, attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "no fate action is found");
+ return 0;
+}
+
+/**
+ * Calculate the required bytes that are needed for the action part of the verbs
+ * flow, in addtion returns bit-fields with all the detected action, in order to
+ * avoid another interation over the actions.
+ *
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] action_flags
+ * Pointer to the detected actions.
+ *
+ * @return
+ * The size of the memory needed for all actions.
+ */
+static int
+flow_verbs_get_actions_and_size(const struct rte_flow_action actions[],
+ uint64_t *action_flags)
+{
+ int size = 0;
+ uint64_t detected_actions = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ size += sizeof(struct ibv_flow_spec_action_tag);
+ detected_actions |= MLX5_FLOW_ACTION_FLAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ size += sizeof(struct ibv_flow_spec_action_tag);
+ detected_actions |= MLX5_FLOW_ACTION_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ size += sizeof(struct ibv_flow_spec_action_drop);
+ detected_actions |= MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ detected_actions |= MLX5_FLOW_ACTION_QUEUE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ detected_actions |= MLX5_FLOW_ACTION_RSS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
+ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ size += sizeof(struct ibv_flow_spec_counter_action);
+#endif
+ detected_actions |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ default:
+ break;
+ }
+ }
+ *action_flags = detected_actions;
+ return size;
+}
+
+/**
+ * Calculate the required bytes that are needed for the item part of the verbs
+ * flow, in addtion returns bit-fields with all the detected action, in order to
+ * avoid another interation over the actions.
+ *
+ * @param[in] actions
+ * Pointer to the list of items.
+ * @param[in, out] item_flags
+ * Pointer to the detected items.
+ *
+ * @return
+ * The size of the memory needed for all items.
+ */
+static int
+flow_verbs_get_items_and_size(const struct rte_flow_item items[],
+ uint64_t *item_flags)
+{
+ int size = 0;
+ uint64_t detected_items = 0;
+
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int tunnel = !!(detected_items & MLX5_FLOW_LAYER_TUNNEL);
+
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ size += sizeof(struct ibv_flow_spec_eth);
+ detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ size += sizeof(struct ibv_flow_spec_eth);
+ detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ size += sizeof(struct ibv_flow_spec_ipv4_ext);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ size += sizeof(struct ibv_flow_spec_ipv6);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ size += sizeof(struct ibv_flow_spec_tcp_udp);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ size += sizeof(struct ibv_flow_spec_tcp_udp);
+ detected_items |= tunnel ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ size += sizeof(struct ibv_flow_spec_tunnel);
+ detected_items |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ size += sizeof(struct ibv_flow_spec_tunnel);
+ detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ size += sizeof(struct ibv_flow_spec_gre);
+ detected_items |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ size += sizeof(struct ibv_flow_spec_mpls);
+ detected_items |= MLX5_FLOW_LAYER_MPLS;
+ break;
+#else
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ size += sizeof(struct ibv_flow_spec_tunnel);
+ detected_items |= MLX5_FLOW_LAYER_TUNNEL;
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ *item_flags = detected_items;
+ return size;
+}
+
+/**
+ * Internal preparation function. Allocate mlx5_flow with the required size.
+ * The required size is calculate based on the actions and items. This function
+ * also returns the detected actions and items for later use.
+ *
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] item_flags
+ * Pointer to bit mask of all items detected.
+ * @param[out] action_flags
+ * Pointer to bit mask of all actions detected.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
+ * is set.
+ */
+static struct mlx5_flow *
+flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ uint64_t *item_flags,
+ uint64_t *action_flags,
+ struct rte_flow_error *error)
+{
+ uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+ struct mlx5_flow *flow;
+
+ size += flow_verbs_get_actions_and_size(actions, action_flags);
+ size += flow_verbs_get_items_and_size(items, item_flags);
+ flow = rte_calloc(__func__, 1, size, 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to create flow");
+ return NULL;
+ }
+ flow->verbs.attr = (void *)(flow + 1);
+ flow->verbs.specs =
+ (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
+ return flow;
+}
+
+/**
+ * Fill the flow with verb spec.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5 flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, else a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_verbs_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ uint64_t action_flags = 0;
+ uint64_t item_flags = 0;
+ uint64_t priority = attr->priority;
+ struct priv *priv = dev->data->dev_private;
+
+ if (priority == MLX5_FLOW_PRIO_RSVD)
+ priority = priv->config.flow_prio - 1;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ int ret;
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ flow_verbs_translate_action_flag(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ flow_verbs_translate_action_mark(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ flow_verbs_translate_action_drop(&action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ flow_verbs_translate_action_queue(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ flow_verbs_translate_action_rss(actions,
+ &action_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_verbs_translate_action_count(dev,
+ actions,
+ &action_flags,
+ dev_flow,
+ error);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ /* Device flow should have action flags by flow_drv_prepare(). */
+ assert(dev_flow->flow->actions == action_flags);
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_verbs_translate_item_eth(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ flow_verbs_translate_item_vlan(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ flow_verbs_translate_item_ipv4(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ flow_verbs_translate_item_ipv6(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ flow_verbs_translate_item_udp(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ flow_verbs_translate_item_tcp(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ flow_verbs_translate_item_vxlan(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ flow_verbs_translate_item_vxlan_gpe(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ flow_verbs_translate_item_gre(items, &item_flags,
+ dev_flow);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ flow_verbs_translate_item_mpls(items, &item_flags,
+ dev_flow);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "item not supported");
+ }
+ }
+ dev_flow->verbs.attr->priority =
+ mlx5_flow_adjust_priority(dev, priority,
+ dev_flow->verbs.attr->priority);
+ return 0;
+}
+
+/**
+ * Remove the flow from the NIC but keeps it in memory.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow_verbs *verbs;
+ struct mlx5_flow *dev_flow;
+
+ if (!flow)
+ return;
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ verbs = &dev_flow->verbs;
+ if (verbs->flow) {
+ claim_zero(mlx5_glue->destroy_flow(verbs->flow));
+ verbs->flow = NULL;
+ }
+ if (verbs->hrxq) {
+ if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ if (flow->counter) {
+ flow_verbs_counter_release(flow->counter);
+ flow->counter = NULL;
+ }
+}
+
+/**
+ * Remove the flow from the NIC and the memory.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_flow *dev_flow;
+
+ if (!flow)
+ return;
+ flow_verbs_remove(dev, flow);
+ while (!LIST_EMPTY(&flow->dev_flows)) {
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ LIST_REMOVE(dev_flow, next);
+ rte_free(dev_flow);
+ }
+}
+
+/**
+ * Apply the flow to the NIC.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_verbs *verbs;
+ struct mlx5_flow *dev_flow;
+ int err;
+
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ verbs = &dev_flow->verbs;
+ if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+ verbs->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!verbs->hrxq) {
+ rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get drop hash queue");
+ goto error;
+ }
+ } else {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_hrxq_get(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num);
+ if (!hrxq)
+ hrxq = mlx5_hrxq_new(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num,
+ !!(dev_flow->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ verbs->hrxq = hrxq;
+ }
+ verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
+ verbs->attr);
+ if (!verbs->flow) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "hardware refuses to create flow");
+ goto error;
+ }
+ }
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ verbs = &dev_flow->verbs;
+ if (verbs->hrxq) {
+ if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Query a flow.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+static int
+flow_verbs_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ int ret = -EINVAL;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_verbs_counter_query(dev, flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ return ret;
+}
+
+const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
+ .validate = flow_verbs_validate,
+ .prepare = flow_verbs_prepare,
+ .translate = flow_verbs_translate,
+ .apply = flow_verbs_apply,
+ .remove = flow_verbs_remove,
+ .destroy = flow_verbs_destroy,
+ .query = flow_verbs_query,
+};
diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c
index 84f9492a..1afb114f 100644
--- a/drivers/net/mlx5/mlx5_glue.c
+++ b/drivers/net/mlx5/mlx5_glue.c
@@ -215,7 +215,7 @@ static struct ibv_counter_set *
mlx5_glue_create_counter_set(struct ibv_context *context,
struct ibv_counter_set_init_attr *init_attr)
{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
(void)context;
(void)init_attr;
return NULL;
@@ -227,7 +227,7 @@ mlx5_glue_create_counter_set(struct ibv_context *context,
static int
mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)
{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
(void)cs;
return ENOTSUP;
#else
@@ -240,7 +240,7 @@ mlx5_glue_describe_counter_set(struct ibv_context *context,
uint16_t counter_set_id,
struct ibv_counter_set_description *cs_desc)
{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
(void)context;
(void)counter_set_id;
(void)cs_desc;
@@ -254,7 +254,7 @@ static int
mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
struct ibv_counter_set_data *cs_data)
{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
(void)query_attr;
(void)cs_data;
return ENOTSUP;
@@ -263,6 +263,62 @@ mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
#endif
}
+static struct ibv_counters *
+mlx5_glue_create_counters(struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)context;
+ (void)init_attr;
+ return NULL;
+#else
+ return ibv_create_counters(context, init_attr);
+#endif
+}
+
+static int
+mlx5_glue_destroy_counters(struct ibv_counters *counters)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ return ENOTSUP;
+#else
+ return ibv_destroy_counters(counters);
+#endif
+}
+
+static int
+mlx5_glue_attach_counters(struct ibv_counters *counters,
+ struct ibv_counter_attach_attr *attr,
+ struct ibv_flow *flow)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ (void)attr;
+ (void)flow;
+ return ENOTSUP;
+#else
+ return ibv_attach_counters_point_flow(counters, attr, flow);
+#endif
+}
+
+static int
+mlx5_glue_query_counters(struct ibv_counters *counters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ (void)counters_value;
+ (void)ncounters;
+ (void)flags;
+ return ENOTSUP;
+#else
+ return ibv_read_counters(counters, counters_value, ncounters, flags);
+#endif
+}
+
static void
mlx5_glue_ack_async_event(struct ibv_async_event *event)
{
@@ -346,6 +402,48 @@ mlx5_glue_dv_create_qp(struct ibv_context *context,
#endif
}
+static struct mlx5dv_flow_matcher *
+mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,
+ struct mlx5dv_flow_matcher_attr *matcher_attr)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ return mlx5dv_create_flow_matcher(context, matcher_attr);
+#else
+ (void)context;
+ (void)matcher_attr;
+ return NULL;
+#endif
+}
+
+static struct ibv_flow *
+mlx5_glue_dv_create_flow(struct mlx5dv_flow_matcher *matcher,
+ struct mlx5dv_flow_match_parameters *match_value,
+ size_t num_actions,
+ struct mlx5dv_flow_action_attr *actions_attr)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ return mlx5dv_create_flow(matcher, match_value,
+ num_actions, actions_attr);
+#else
+ (void)matcher;
+ (void)match_value;
+ (void)num_actions;
+ (void)actions_attr;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow_matcher(struct mlx5dv_flow_matcher *matcher)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ return mlx5dv_destroy_flow_matcher(matcher);
+#else
+ (void)matcher;
+ return 0;
+#endif
+}
+
alignas(RTE_CACHE_LINE_SIZE)
const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.version = MLX5_GLUE_VERSION,
@@ -382,6 +480,10 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.destroy_counter_set = mlx5_glue_destroy_counter_set,
.describe_counter_set = mlx5_glue_describe_counter_set,
.query_counter_set = mlx5_glue_query_counter_set,
+ .create_counters = mlx5_glue_create_counters,
+ .destroy_counters = mlx5_glue_destroy_counters,
+ .attach_counters = mlx5_glue_attach_counters,
+ .query_counters = mlx5_glue_query_counters,
.ack_async_event = mlx5_glue_ack_async_event,
.get_async_event = mlx5_glue_get_async_event,
.port_state_str = mlx5_glue_port_state_str,
@@ -392,4 +494,7 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.dv_set_context_attr = mlx5_glue_dv_set_context_attr,
.dv_init_obj = mlx5_glue_dv_init_obj,
.dv_create_qp = mlx5_glue_dv_create_qp,
+ .dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,
+ .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
+ .dv_create_flow = mlx5_glue_dv_create_flow,
};
diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h
index e584d367..44bfefed 100644
--- a/drivers/net/mlx5/mlx5_glue.h
+++ b/drivers/net/mlx5/mlx5_glue.h
@@ -23,7 +23,7 @@
#define MLX5_GLUE_VERSION ""
#endif
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
struct ibv_counter_set;
struct ibv_counter_set_data;
struct ibv_counter_set_description;
@@ -31,6 +31,12 @@ struct ibv_counter_set_init_attr;
struct ibv_query_counter_set_attr;
#endif
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+struct ibv_counters;
+struct ibv_counters_init_attr;
+struct ibv_counter_attach_attr;
+#endif
+
#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
struct mlx5dv_qp_init_attr;
#endif
@@ -39,6 +45,13 @@ struct mlx5dv_qp_init_attr;
struct mlx5dv_wq_init_attr;
#endif
+#ifndef HAVE_IBV_FLOW_DV_SUPPORT
+struct mlx5dv_flow_matcher;
+struct mlx5dv_flow_matcher_attr;
+struct mlx5dv_flow_action_attr;
+struct mlx5dv_flow_match_parameters;
+#endif
+
/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
struct mlx5_glue {
const char *version;
@@ -99,6 +112,17 @@ struct mlx5_glue {
struct ibv_counter_set_description *cs_desc);
int (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,
struct ibv_counter_set_data *cs_data);
+ struct ibv_counters *(*create_counters)
+ (struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr);
+ int (*destroy_counters)(struct ibv_counters *counters);
+ int (*attach_counters)(struct ibv_counters *counters,
+ struct ibv_counter_attach_attr *attr,
+ struct ibv_flow *flow);
+ int (*query_counters)(struct ibv_counters *counters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags);
void (*ack_async_event)(struct ibv_async_event *event);
int (*get_async_event)(struct ibv_context *context,
struct ibv_async_event *event);
@@ -122,6 +146,14 @@ struct mlx5_glue {
(struct ibv_context *context,
struct ibv_qp_init_attr_ex *qp_init_attr_ex,
struct mlx5dv_qp_init_attr *dv_qp_init_attr);
+ struct mlx5dv_flow_matcher *(*dv_create_flow_matcher)
+ (struct ibv_context *context,
+ struct mlx5dv_flow_matcher_attr *matcher_attr);
+ int (*dv_destroy_flow_matcher)(struct mlx5dv_flow_matcher *matcher);
+ struct ibv_flow *(*dv_create_flow)(struct mlx5dv_flow_matcher *matcher,
+ struct mlx5dv_flow_match_parameters *match_value,
+ size_t num_actions,
+ struct mlx5dv_flow_action_attr *actions_attr);
};
const struct mlx5_glue *mlx5_glue;
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 12ee37f5..672a4761 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -49,7 +49,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
struct ifreq request;
int ret;
- ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request, 0);
+ ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);
if (ret)
return ret;
memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 1d1bcb5f..f4b15d3f 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -277,6 +277,23 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
uintptr_t end = 0;
uint32_t idx = 0;
+ /* MR for external memory doesn't have memseg list. */
+ if (mr->msl == NULL) {
+ struct ibv_mr *ibv_mr = mr->ibv_mr;
+
+ assert(mr->ms_bmp_n == 1);
+ assert(mr->ms_n == 1);
+ assert(base_idx == 0);
+ /*
+ * Can't search it from memseg list but get it directly from
+ * verbs MR as there's only one chunk.
+ */
+ entry->start = (uintptr_t)ibv_mr->addr;
+ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ /* Returning 1 ends iteration. */
+ return 1;
+ }
for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
if (rte_bitmap_get(mr->ms_bmp, idx)) {
const struct rte_memseg_list *msl;
@@ -811,6 +828,7 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
mr = mr_lookup_dev_list(dev, &entry, start);
if (mr == NULL)
continue;
+ assert(mr->msl); /* Can't be external memory. */
ms = rte_mem_virt2memseg((void *)start, msl);
assert(ms != NULL);
assert(msl->page_sz == ms->hugepage_sz);
@@ -1061,6 +1079,139 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
(void *)mr_ctrl, mr_ctrl->cur_gen);
}
+/**
+ * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
+ *
+ * Externally allocated chunk is registered and a MR is created for the chunk.
+ * The MR object is added to the global list. If memseg list of a MR object
+ * (mr->msl) is null, the MR object can be regarded as externally allocated
+ * memory.
+ *
+ * Once external memory is registered, it should be static. If the memory is
+ * freed and the virtual address range has different physical memory mapped
+ * again, it may cause crash on device due to the wrong translation entry. PMD
+ * can't track the free event of the external memory for now.
+ */
+static void
+mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ struct rte_eth_dev *dev = data->dev;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
+ struct mlx5_mr *mr = NULL;
+ uintptr_t addr = (uintptr_t)memhdr->addr;
+ size_t len = memhdr->len;
+ struct mlx5_mr_cache entry;
+ uint32_t lkey;
+
+ /* If already registered, it should return. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_lookup_dev(dev, &entry, addr);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ if (lkey != UINT32_MAX)
+ return;
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (mr == NULL) {
+ DRV_LOG(WARNING,
+ "port %u unable to allocate memory for a new MR of"
+ " mempool (%s).",
+ dev->data->port_id, mp->name);
+ data->ret = -1;
+ return;
+ }
+ DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
+ dev->data->port_id, mem_idx, mp->name);
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ DRV_LOG(WARNING,
+ "port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_free(mr);
+ data->ret = -1;
+ return;
+ }
+ mr->msl = NULL; /* Mark it is external memory. */
+ mr->ms_bmp = NULL;
+ mr->ms_n = 1;
+ mr->ms_bmp_n = 1;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DRV_LOG(DEBUG,
+ "port %u MR CREATED (%p) for external memory %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Insert to the local cache table */
+ mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
+}
+
+/**
+ * Register MR for entire memory chunks in a Mempool having externally allocated
+ * memory and fill in local cache.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static uint32_t
+mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
+ return data.ret;
+}
+
+/**
+ * Register MR entire memory chunks in a Mempool having externally allocated
+ * memory and search LKey of the address to return.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Search key.
+ * @param mp
+ * Pointer to registering Mempool where addr belongs.
+ *
+ * @return
+ * LKey for address on success, UINT32_MAX on failure.
+ */
+uint32_t
+mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+ struct rte_mempool *mp)
+{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq_ctrl->priv;
+
+ mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
+ return mlx5_tx_addr2mr_bh(txq, addr);
+}
+
/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
static void
mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
@@ -1104,6 +1255,10 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
};
rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
+ if (data.ret < 0 && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
+ }
return data.ret;
}
diff --git a/drivers/net/mlx5/mlx5_nl_flow.c b/drivers/net/mlx5/mlx5_nl_flow.c
deleted file mode 100644
index a1c8c340..00000000
--- a/drivers/net/mlx5/mlx5_nl_flow.c
+++ /dev/null
@@ -1,1248 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd
- */
-
-#include <assert.h>
-#include <errno.h>
-#include <libmnl/libmnl.h>
-#include <linux/if_ether.h>
-#include <linux/netlink.h>
-#include <linux/pkt_cls.h>
-#include <linux/pkt_sched.h>
-#include <linux/rtnetlink.h>
-#include <linux/tc_act/tc_gact.h>
-#include <linux/tc_act/tc_mirred.h>
-#include <netinet/in.h>
-#include <stdalign.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/socket.h>
-
-#include <rte_byteorder.h>
-#include <rte_errno.h>
-#include <rte_ether.h>
-#include <rte_flow.h>
-
-#include "mlx5.h"
-#include "mlx5_autoconf.h"
-
-#ifdef HAVE_TC_ACT_VLAN
-
-#include <linux/tc_act/tc_vlan.h>
-
-#else /* HAVE_TC_ACT_VLAN */
-
-#define TCA_VLAN_ACT_POP 1
-#define TCA_VLAN_ACT_PUSH 2
-#define TCA_VLAN_ACT_MODIFY 3
-#define TCA_VLAN_PARMS 2
-#define TCA_VLAN_PUSH_VLAN_ID 3
-#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4
-#define TCA_VLAN_PAD 5
-#define TCA_VLAN_PUSH_VLAN_PRIORITY 6
-
-struct tc_vlan {
- tc_gen;
- int v_action;
-};
-
-#endif /* HAVE_TC_ACT_VLAN */
-
-/* Normally found in linux/netlink.h. */
-#ifndef NETLINK_CAP_ACK
-#define NETLINK_CAP_ACK 10
-#endif
-
-/* Normally found in linux/pkt_sched.h. */
-#ifndef TC_H_MIN_INGRESS
-#define TC_H_MIN_INGRESS 0xfff2u
-#endif
-
-/* Normally found in linux/pkt_cls.h. */
-#ifndef TCA_CLS_FLAGS_SKIP_SW
-#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
-#endif
-#ifndef HAVE_TCA_FLOWER_ACT
-#define TCA_FLOWER_ACT 3
-#endif
-#ifndef HAVE_TCA_FLOWER_FLAGS
-#define TCA_FLOWER_FLAGS 22
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
-#define TCA_FLOWER_KEY_ETH_TYPE 8
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
-#define TCA_FLOWER_KEY_ETH_DST 4
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
-#define TCA_FLOWER_KEY_ETH_DST_MASK 5
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
-#define TCA_FLOWER_KEY_ETH_SRC 6
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
-#define TCA_FLOWER_KEY_ETH_SRC_MASK 7
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
-#define TCA_FLOWER_KEY_IP_PROTO 9
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
-#define TCA_FLOWER_KEY_IPV4_SRC 10
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
-#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
-#define TCA_FLOWER_KEY_IPV4_DST 12
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
-#define TCA_FLOWER_KEY_IPV4_DST_MASK 13
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
-#define TCA_FLOWER_KEY_IPV6_SRC 14
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
-#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
-#define TCA_FLOWER_KEY_IPV6_DST 16
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
-#define TCA_FLOWER_KEY_IPV6_DST_MASK 17
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
-#define TCA_FLOWER_KEY_TCP_SRC 18
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
-#define TCA_FLOWER_KEY_TCP_SRC_MASK 35
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
-#define TCA_FLOWER_KEY_TCP_DST 19
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
-#define TCA_FLOWER_KEY_TCP_DST_MASK 36
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
-#define TCA_FLOWER_KEY_UDP_SRC 20
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
-#define TCA_FLOWER_KEY_UDP_SRC_MASK 37
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
-#define TCA_FLOWER_KEY_UDP_DST 21
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
-#define TCA_FLOWER_KEY_UDP_DST_MASK 38
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
-#define TCA_FLOWER_KEY_VLAN_ID 23
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
-#define TCA_FLOWER_KEY_VLAN_PRIO 24
-#endif
-#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
-#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
-#endif
-
-/** Parser state definitions for mlx5_nl_flow_trans[]. */
-enum mlx5_nl_flow_trans {
- INVALID,
- BACK,
- ATTR,
- PATTERN,
- ITEM_VOID,
- ITEM_PORT_ID,
- ITEM_ETH,
- ITEM_VLAN,
- ITEM_IPV4,
- ITEM_IPV6,
- ITEM_TCP,
- ITEM_UDP,
- ACTIONS,
- ACTION_VOID,
- ACTION_PORT_ID,
- ACTION_DROP,
- ACTION_OF_POP_VLAN,
- ACTION_OF_PUSH_VLAN,
- ACTION_OF_SET_VLAN_VID,
- ACTION_OF_SET_VLAN_PCP,
- END,
-};
-
-#define TRANS(...) (const enum mlx5_nl_flow_trans []){ __VA_ARGS__, INVALID, }
-
-#define PATTERN_COMMON \
- ITEM_VOID, ITEM_PORT_ID, ACTIONS
-#define ACTIONS_COMMON \
- ACTION_VOID, ACTION_OF_POP_VLAN, ACTION_OF_PUSH_VLAN, \
- ACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP
-#define ACTIONS_FATE \
- ACTION_PORT_ID, ACTION_DROP
-
-/** Parser state transitions used by mlx5_nl_flow_transpose(). */
-static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = {
- [INVALID] = NULL,
- [BACK] = NULL,
- [ATTR] = TRANS(PATTERN),
- [PATTERN] = TRANS(ITEM_ETH, PATTERN_COMMON),
- [ITEM_VOID] = TRANS(BACK),
- [ITEM_PORT_ID] = TRANS(BACK),
- [ITEM_ETH] = TRANS(ITEM_IPV4, ITEM_IPV6, ITEM_VLAN, PATTERN_COMMON),
- [ITEM_VLAN] = TRANS(ITEM_IPV4, ITEM_IPV6, PATTERN_COMMON),
- [ITEM_IPV4] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
- [ITEM_IPV6] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
- [ITEM_TCP] = TRANS(PATTERN_COMMON),
- [ITEM_UDP] = TRANS(PATTERN_COMMON),
- [ACTIONS] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
- [ACTION_VOID] = TRANS(BACK),
- [ACTION_PORT_ID] = TRANS(ACTION_VOID, END),
- [ACTION_DROP] = TRANS(ACTION_VOID, END),
- [ACTION_OF_POP_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
- [ACTION_OF_PUSH_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
- [ACTION_OF_SET_VLAN_VID] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
- [ACTION_OF_SET_VLAN_PCP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
- [END] = NULL,
-};
-
-/** Empty masks for known item types. */
-static const union {
- struct rte_flow_item_port_id port_id;
- struct rte_flow_item_eth eth;
- struct rte_flow_item_vlan vlan;
- struct rte_flow_item_ipv4 ipv4;
- struct rte_flow_item_ipv6 ipv6;
- struct rte_flow_item_tcp tcp;
- struct rte_flow_item_udp udp;
-} mlx5_nl_flow_mask_empty;
-
-/** Supported masks for known item types. */
-static const struct {
- struct rte_flow_item_port_id port_id;
- struct rte_flow_item_eth eth;
- struct rte_flow_item_vlan vlan;
- struct rte_flow_item_ipv4 ipv4;
- struct rte_flow_item_ipv6 ipv6;
- struct rte_flow_item_tcp tcp;
- struct rte_flow_item_udp udp;
-} mlx5_nl_flow_mask_supported = {
- .port_id = {
- .id = 0xffffffff,
- },
- .eth = {
- .type = RTE_BE16(0xffff),
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- },
- .vlan = {
- /* PCP and VID only, no DEI. */
- .tci = RTE_BE16(0xefff),
- .inner_type = RTE_BE16(0xffff),
- },
- .ipv4.hdr = {
- .next_proto_id = 0xff,
- .src_addr = RTE_BE32(0xffffffff),
- .dst_addr = RTE_BE32(0xffffffff),
- },
- .ipv6.hdr = {
- .proto = 0xff,
- .src_addr =
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .dst_addr =
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- },
- .tcp.hdr = {
- .src_port = RTE_BE16(0xffff),
- .dst_port = RTE_BE16(0xffff),
- },
- .udp.hdr = {
- .src_port = RTE_BE16(0xffff),
- .dst_port = RTE_BE16(0xffff),
- },
-};
-
-/**
- * Retrieve mask for pattern item.
- *
- * This function does basic sanity checks on a pattern item in order to
- * return the most appropriate mask for it.
- *
- * @param[in] item
- * Item specification.
- * @param[in] mask_default
- * Default mask for pattern item as specified by the flow API.
- * @param[in] mask_supported
- * Mask fields supported by the implementation.
- * @param[in] mask_empty
- * Empty mask to return when there is no specification.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * Either @p item->mask or one of the mask parameters on success, NULL
- * otherwise and rte_errno is set.
- */
-static const void *
-mlx5_nl_flow_item_mask(const struct rte_flow_item *item,
- const void *mask_default,
- const void *mask_supported,
- const void *mask_empty,
- size_t mask_size,
- struct rte_flow_error *error)
-{
- const uint8_t *mask;
- size_t i;
-
- /* item->last and item->mask cannot exist without item->spec. */
- if (!item->spec && (item->mask || item->last)) {
- rte_flow_error_set
- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
- "\"mask\" or \"last\" field provided without a"
- " corresponding \"spec\"");
- return NULL;
- }
- /* No spec, no mask, no problem. */
- if (!item->spec)
- return mask_empty;
- mask = item->mask ? item->mask : mask_default;
- assert(mask);
- /*
- * Single-pass check to make sure that:
- * - Mask is supported, no bits are set outside mask_supported.
- * - Both item->spec and item->last are included in mask.
- */
- for (i = 0; i != mask_size; ++i) {
- if (!mask[i])
- continue;
- if ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=
- ((const uint8_t *)mask_supported)[i]) {
- rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask, "unsupported field found in \"mask\"");
- return NULL;
- }
- if (item->last &&
- (((const uint8_t *)item->spec)[i] & mask[i]) !=
- (((const uint8_t *)item->last)[i] & mask[i])) {
- rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST,
- item->last,
- "range between \"spec\" and \"last\" not"
- " comprised in \"mask\"");
- return NULL;
- }
- }
- return mask;
-}
-
-/**
- * Transpose flow rule description to rtnetlink message.
- *
- * This function transposes a flow rule description to a traffic control
- * (TC) filter creation message ready to be sent over Netlink.
- *
- * Target interface is specified as the first entry of the @p ptoi table.
- * Subsequent entries enable this function to resolve other DPDK port IDs
- * found in the flow rule.
- *
- * @param[out] buf
- * Output message buffer. May be NULL when @p size is 0.
- * @param size
- * Size of @p buf. Message may be truncated if not large enough.
- * @param[in] ptoi
- * DPDK port ID to network interface index translation table. This table
- * is terminated by an entry with a zero ifindex value.
- * @param[in] attr
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification.
- * @param[in] actions
- * Associated actions.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * A positive value representing the exact size of the message in bytes
- * regardless of the @p size parameter on success, a negative errno value
- * otherwise and rte_errno is set.
- */
-int
-mlx5_nl_flow_transpose(void *buf,
- size_t size,
- const struct mlx5_nl_flow_ptoi *ptoi,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action *actions,
- struct rte_flow_error *error)
-{
- alignas(struct nlmsghdr)
- uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)];
- const struct rte_flow_item *item;
- const struct rte_flow_action *action;
- unsigned int n;
- uint32_t act_index_cur;
- bool in_port_id_set;
- bool eth_type_set;
- bool vlan_present;
- bool vlan_eth_type_set;
- bool ip_proto_set;
- struct nlattr *na_flower;
- struct nlattr *na_flower_act;
- struct nlattr *na_vlan_id;
- struct nlattr *na_vlan_priority;
- const enum mlx5_nl_flow_trans *trans;
- const enum mlx5_nl_flow_trans *back;
-
- if (!size)
- goto error_nobufs;
-init:
- item = pattern;
- action = actions;
- n = 0;
- act_index_cur = 0;
- in_port_id_set = false;
- eth_type_set = false;
- vlan_present = false;
- vlan_eth_type_set = false;
- ip_proto_set = false;
- na_flower = NULL;
- na_flower_act = NULL;
- na_vlan_id = NULL;
- na_vlan_priority = NULL;
- trans = TRANS(ATTR);
- back = trans;
-trans:
- switch (trans[n++]) {
- union {
- const struct rte_flow_item_port_id *port_id;
- const struct rte_flow_item_eth *eth;
- const struct rte_flow_item_vlan *vlan;
- const struct rte_flow_item_ipv4 *ipv4;
- const struct rte_flow_item_ipv6 *ipv6;
- const struct rte_flow_item_tcp *tcp;
- const struct rte_flow_item_udp *udp;
- } spec, mask;
- union {
- const struct rte_flow_action_port_id *port_id;
- const struct rte_flow_action_of_push_vlan *of_push_vlan;
- const struct rte_flow_action_of_set_vlan_vid *
- of_set_vlan_vid;
- const struct rte_flow_action_of_set_vlan_pcp *
- of_set_vlan_pcp;
- } conf;
- struct nlmsghdr *nlh;
- struct tcmsg *tcm;
- struct nlattr *act_index;
- struct nlattr *act;
- unsigned int i;
-
- case INVALID:
- if (item->type)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- item, "unsupported pattern item combination");
- else if (action->type)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- action, "unsupported action combination");
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "flow rule lacks some kind of fate action");
- case BACK:
- trans = back;
- n = 0;
- goto trans;
- case ATTR:
- /*
- * Supported attributes: no groups, some priorities and
- * ingress only. Don't care about transfer as it is the
- * caller's problem.
- */
- if (attr->group)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- attr, "groups are not supported");
- if (attr->priority > 0xfffe)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- attr, "lowest priority level is 0xfffe");
- if (!attr->ingress)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- attr, "only ingress is supported");
- if (attr->egress)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- attr, "egress is not supported");
- if (size < mnl_nlmsg_size(sizeof(*tcm)))
- goto error_nobufs;
- nlh = mnl_nlmsg_put_header(buf);
- nlh->nlmsg_type = 0;
- nlh->nlmsg_flags = 0;
- nlh->nlmsg_seq = 0;
- tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
- tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = ptoi[0].ifindex;
- /*
- * Let kernel pick a handle by default. A predictable handle
- * can be set by the caller on the resulting buffer through
- * mlx5_nl_flow_brand().
- */
- tcm->tcm_handle = 0;
- tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);
- /*
- * Priority cannot be zero to prevent the kernel from
- * picking one automatically.
- */
- tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16,
- RTE_BE16(ETH_P_ALL));
- break;
- case PATTERN:
- if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower"))
- goto error_nobufs;
- na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS);
- if (!na_flower)
- goto error_nobufs;
- if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS,
- TCA_CLS_FLAGS_SKIP_SW))
- goto error_nobufs;
- break;
- case ITEM_VOID:
- if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
- goto trans;
- ++item;
- break;
- case ITEM_PORT_ID:
- if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID)
- goto trans;
- mask.port_id = mlx5_nl_flow_item_mask
- (item, &rte_flow_item_port_id_mask,
- &mlx5_nl_flow_mask_supported.port_id,
- &mlx5_nl_flow_mask_empty.port_id,
- sizeof(mlx5_nl_flow_mask_supported.port_id), error);
- if (!mask.port_id)
- return -rte_errno;
- if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) {
- in_port_id_set = 1;
- ++item;
- break;
- }
- spec.port_id = item->spec;
- if (mask.port_id->id && mask.port_id->id != 0xffffffff)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask.port_id,
- "no support for partial mask on"
- " \"id\" field");
- if (!mask.port_id->id)
- i = 0;
- else
- for (i = 0; ptoi[i].ifindex; ++i)
- if (ptoi[i].port_id == spec.port_id->id)
- break;
- if (!ptoi[i].ifindex)
- return rte_flow_error_set
- (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
- spec.port_id,
- "missing data to convert port ID to ifindex");
- tcm = mnl_nlmsg_get_payload(buf);
- if (in_port_id_set &&
- ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
- spec.port_id,
- "cannot match traffic for several port IDs"
- " through a single flow rule");
- tcm->tcm_ifindex = ptoi[i].ifindex;
- in_port_id_set = 1;
- ++item;
- break;
- case ITEM_ETH:
- if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
- goto trans;
- mask.eth = mlx5_nl_flow_item_mask
- (item, &rte_flow_item_eth_mask,
- &mlx5_nl_flow_mask_supported.eth,
- &mlx5_nl_flow_mask_empty.eth,
- sizeof(mlx5_nl_flow_mask_supported.eth), error);
- if (!mask.eth)
- return -rte_errno;
- if (mask.eth == &mlx5_nl_flow_mask_empty.eth) {
- ++item;
- break;
- }
- spec.eth = item->spec;
- if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask.eth,
- "no support for partial mask on"
- " \"type\" field");
- if (mask.eth->type) {
- if (!mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_ETH_TYPE,
- spec.eth->type))
- goto error_nobufs;
- eth_type_set = 1;
- }
- if ((!is_zero_ether_addr(&mask.eth->dst) &&
- (!mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_ETH_DST,
- ETHER_ADDR_LEN,
- spec.eth->dst.addr_bytes) ||
- !mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_ETH_DST_MASK,
- ETHER_ADDR_LEN,
- mask.eth->dst.addr_bytes))) ||
- (!is_zero_ether_addr(&mask.eth->src) &&
- (!mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_ETH_SRC,
- ETHER_ADDR_LEN,
- spec.eth->src.addr_bytes) ||
- !mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_ETH_SRC_MASK,
- ETHER_ADDR_LEN,
- mask.eth->src.addr_bytes))))
- goto error_nobufs;
- ++item;
- break;
- case ITEM_VLAN:
- if (item->type != RTE_FLOW_ITEM_TYPE_VLAN)
- goto trans;
- mask.vlan = mlx5_nl_flow_item_mask
- (item, &rte_flow_item_vlan_mask,
- &mlx5_nl_flow_mask_supported.vlan,
- &mlx5_nl_flow_mask_empty.vlan,
- sizeof(mlx5_nl_flow_mask_supported.vlan), error);
- if (!mask.vlan)
- return -rte_errno;
- if (!eth_type_set &&
- !mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_ETH_TYPE,
- RTE_BE16(ETH_P_8021Q)))
- goto error_nobufs;
- eth_type_set = 1;
- vlan_present = 1;
- if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) {
- ++item;
- break;
- }
- spec.vlan = item->spec;
- if ((mask.vlan->tci & RTE_BE16(0xe000) &&
- (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) ||
- (mask.vlan->tci & RTE_BE16(0x0fff) &&
- (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) ||
- (mask.vlan->inner_type &&
- mask.vlan->inner_type != RTE_BE16(0xffff)))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask.vlan,
- "no support for partial masks on"
- " \"tci\" (PCP and VID parts) and"
- " \"inner_type\" fields");
- if (mask.vlan->inner_type) {
- if (!mnl_attr_put_u16_check
- (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
- spec.vlan->inner_type))
- goto error_nobufs;
- vlan_eth_type_set = 1;
- }
- if ((mask.vlan->tci & RTE_BE16(0xe000) &&
- !mnl_attr_put_u8_check
- (buf, size, TCA_FLOWER_KEY_VLAN_PRIO,
- (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) ||
- (mask.vlan->tci & RTE_BE16(0x0fff) &&
- !mnl_attr_put_u16_check
- (buf, size, TCA_FLOWER_KEY_VLAN_ID,
- rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff)))))
- goto error_nobufs;
- ++item;
- break;
- case ITEM_IPV4:
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4)
- goto trans;
- mask.ipv4 = mlx5_nl_flow_item_mask
- (item, &rte_flow_item_ipv4_mask,
- &mlx5_nl_flow_mask_supported.ipv4,
- &mlx5_nl_flow_mask_empty.ipv4,
- sizeof(mlx5_nl_flow_mask_supported.ipv4), error);
- if (!mask.ipv4)
- return -rte_errno;
- if ((!eth_type_set || !vlan_eth_type_set) &&
- !mnl_attr_put_u16_check(buf, size,
- vlan_present ?
- TCA_FLOWER_KEY_VLAN_ETH_TYPE :
- TCA_FLOWER_KEY_ETH_TYPE,
- RTE_BE16(ETH_P_IP)))
- goto error_nobufs;
- eth_type_set = 1;
- vlan_eth_type_set = 1;
- if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) {
- ++item;
- break;
- }
- spec.ipv4 = item->spec;
- if (mask.ipv4->hdr.next_proto_id &&
- mask.ipv4->hdr.next_proto_id != 0xff)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask.ipv4,
- "no support for partial mask on"
- " \"hdr.next_proto_id\" field");
- if (mask.ipv4->hdr.next_proto_id) {
- if (!mnl_attr_put_u8_check
- (buf, size, TCA_FLOWER_KEY_IP_PROTO,
- spec.ipv4->hdr.next_proto_id))
- goto error_nobufs;
- ip_proto_set = 1;
- }
- if ((mask.ipv4->hdr.src_addr &&
- (!mnl_attr_put_u32_check(buf, size,
- TCA_FLOWER_KEY_IPV4_SRC,
- spec.ipv4->hdr.src_addr) ||
- !mnl_attr_put_u32_check(buf, size,
- TCA_FLOWER_KEY_IPV4_SRC_MASK,
- mask.ipv4->hdr.src_addr))) ||
- (mask.ipv4->hdr.dst_addr &&
- (!mnl_attr_put_u32_check(buf, size,
- TCA_FLOWER_KEY_IPV4_DST,
- spec.ipv4->hdr.dst_addr) ||
- !mnl_attr_put_u32_check(buf, size,
- TCA_FLOWER_KEY_IPV4_DST_MASK,
- mask.ipv4->hdr.dst_addr))))
- goto error_nobufs;
- ++item;
- break;
- case ITEM_IPV6:
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV6)
- goto trans;
- mask.ipv6 = mlx5_nl_flow_item_mask
- (item, &rte_flow_item_ipv6_mask,
- &mlx5_nl_flow_mask_supported.ipv6,
- &mlx5_nl_flow_mask_empty.ipv6,
- sizeof(mlx5_nl_flow_mask_supported.ipv6), error);
- if (!mask.ipv6)
- return -rte_errno;
- if ((!eth_type_set || !vlan_eth_type_set) &&
- !mnl_attr_put_u16_check(buf, size,
- vlan_present ?
- TCA_FLOWER_KEY_VLAN_ETH_TYPE :
- TCA_FLOWER_KEY_ETH_TYPE,
- RTE_BE16(ETH_P_IPV6)))
- goto error_nobufs;
- eth_type_set = 1;
- vlan_eth_type_set = 1;
- if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) {
- ++item;
- break;
- }
- spec.ipv6 = item->spec;
- if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask.ipv6,
- "no support for partial mask on"
- " \"hdr.proto\" field");
- if (mask.ipv6->hdr.proto) {
- if (!mnl_attr_put_u8_check
- (buf, size, TCA_FLOWER_KEY_IP_PROTO,
- spec.ipv6->hdr.proto))
- goto error_nobufs;
- ip_proto_set = 1;
- }
- if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) &&
- (!mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_IPV6_SRC,
- sizeof(spec.ipv6->hdr.src_addr),
- spec.ipv6->hdr.src_addr) ||
- !mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_IPV6_SRC_MASK,
- sizeof(mask.ipv6->hdr.src_addr),
- mask.ipv6->hdr.src_addr))) ||
- (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr) &&
- (!mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_IPV6_DST,
- sizeof(spec.ipv6->hdr.dst_addr),
- spec.ipv6->hdr.dst_addr) ||
- !mnl_attr_put_check(buf, size,
- TCA_FLOWER_KEY_IPV6_DST_MASK,
- sizeof(mask.ipv6->hdr.dst_addr),
- mask.ipv6->hdr.dst_addr))))
- goto error_nobufs;
- ++item;
- break;
- case ITEM_TCP:
- if (item->type != RTE_FLOW_ITEM_TYPE_TCP)
- goto trans;
- mask.tcp = mlx5_nl_flow_item_mask
- (item, &rte_flow_item_tcp_mask,
- &mlx5_nl_flow_mask_supported.tcp,
- &mlx5_nl_flow_mask_empty.tcp,
- sizeof(mlx5_nl_flow_mask_supported.tcp), error);
- if (!mask.tcp)
- return -rte_errno;
- if (!ip_proto_set &&
- !mnl_attr_put_u8_check(buf, size,
- TCA_FLOWER_KEY_IP_PROTO,
- IPPROTO_TCP))
- goto error_nobufs;
- if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) {
- ++item;
- break;
- }
- spec.tcp = item->spec;
- if ((mask.tcp->hdr.src_port &&
- mask.tcp->hdr.src_port != RTE_BE16(0xffff)) ||
- (mask.tcp->hdr.dst_port &&
- mask.tcp->hdr.dst_port != RTE_BE16(0xffff)))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask.tcp,
- "no support for partial masks on"
- " \"hdr.src_port\" and \"hdr.dst_port\""
- " fields");
- if ((mask.tcp->hdr.src_port &&
- (!mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_TCP_SRC,
- spec.tcp->hdr.src_port) ||
- !mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_TCP_SRC_MASK,
- mask.tcp->hdr.src_port))) ||
- (mask.tcp->hdr.dst_port &&
- (!mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_TCP_DST,
- spec.tcp->hdr.dst_port) ||
- !mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_TCP_DST_MASK,
- mask.tcp->hdr.dst_port))))
- goto error_nobufs;
- ++item;
- break;
- case ITEM_UDP:
- if (item->type != RTE_FLOW_ITEM_TYPE_UDP)
- goto trans;
- mask.udp = mlx5_nl_flow_item_mask
- (item, &rte_flow_item_udp_mask,
- &mlx5_nl_flow_mask_supported.udp,
- &mlx5_nl_flow_mask_empty.udp,
- sizeof(mlx5_nl_flow_mask_supported.udp), error);
- if (!mask.udp)
- return -rte_errno;
- if (!ip_proto_set &&
- !mnl_attr_put_u8_check(buf, size,
- TCA_FLOWER_KEY_IP_PROTO,
- IPPROTO_UDP))
- goto error_nobufs;
- if (mask.udp == &mlx5_nl_flow_mask_empty.udp) {
- ++item;
- break;
- }
- spec.udp = item->spec;
- if ((mask.udp->hdr.src_port &&
- mask.udp->hdr.src_port != RTE_BE16(0xffff)) ||
- (mask.udp->hdr.dst_port &&
- mask.udp->hdr.dst_port != RTE_BE16(0xffff)))
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
- mask.udp,
- "no support for partial masks on"
- " \"hdr.src_port\" and \"hdr.dst_port\""
- " fields");
- if ((mask.udp->hdr.src_port &&
- (!mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_UDP_SRC,
- spec.udp->hdr.src_port) ||
- !mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_UDP_SRC_MASK,
- mask.udp->hdr.src_port))) ||
- (mask.udp->hdr.dst_port &&
- (!mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_UDP_DST,
- spec.udp->hdr.dst_port) ||
- !mnl_attr_put_u16_check(buf, size,
- TCA_FLOWER_KEY_UDP_DST_MASK,
- mask.udp->hdr.dst_port))))
- goto error_nobufs;
- ++item;
- break;
- case ACTIONS:
- if (item->type != RTE_FLOW_ITEM_TYPE_END)
- goto trans;
- assert(na_flower);
- assert(!na_flower_act);
- na_flower_act =
- mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT);
- if (!na_flower_act)
- goto error_nobufs;
- act_index_cur = 1;
- break;
- case ACTION_VOID:
- if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
- goto trans;
- ++action;
- break;
- case ACTION_PORT_ID:
- if (action->type != RTE_FLOW_ACTION_TYPE_PORT_ID)
- goto trans;
- conf.port_id = action->conf;
- if (conf.port_id->original)
- i = 0;
- else
- for (i = 0; ptoi[i].ifindex; ++i)
- if (ptoi[i].port_id == conf.port_id->id)
- break;
- if (!ptoi[i].ifindex)
- return rte_flow_error_set
- (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
- conf.port_id,
- "missing data to convert port ID to ifindex");
- act_index =
- mnl_attr_nest_start_check(buf, size, act_index_cur++);
- if (!act_index ||
- !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred"))
- goto error_nobufs;
- act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
- if (!act)
- goto error_nobufs;
- if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS,
- sizeof(struct tc_mirred),
- &(struct tc_mirred){
- .action = TC_ACT_STOLEN,
- .eaction = TCA_EGRESS_REDIR,
- .ifindex = ptoi[i].ifindex,
- }))
- goto error_nobufs;
- mnl_attr_nest_end(buf, act);
- mnl_attr_nest_end(buf, act_index);
- ++action;
- break;
- case ACTION_DROP:
- if (action->type != RTE_FLOW_ACTION_TYPE_DROP)
- goto trans;
- act_index =
- mnl_attr_nest_start_check(buf, size, act_index_cur++);
- if (!act_index ||
- !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact"))
- goto error_nobufs;
- act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
- if (!act)
- goto error_nobufs;
- if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS,
- sizeof(struct tc_gact),
- &(struct tc_gact){
- .action = TC_ACT_SHOT,
- }))
- goto error_nobufs;
- mnl_attr_nest_end(buf, act);
- mnl_attr_nest_end(buf, act_index);
- ++action;
- break;
- case ACTION_OF_POP_VLAN:
- if (action->type != RTE_FLOW_ACTION_TYPE_OF_POP_VLAN)
- goto trans;
- conf.of_push_vlan = NULL;
- i = TCA_VLAN_ACT_POP;
- goto action_of_vlan;
- case ACTION_OF_PUSH_VLAN:
- if (action->type != RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
- goto trans;
- conf.of_push_vlan = action->conf;
- i = TCA_VLAN_ACT_PUSH;
- goto action_of_vlan;
- case ACTION_OF_SET_VLAN_VID:
- if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
- goto trans;
- conf.of_set_vlan_vid = action->conf;
- if (na_vlan_id)
- goto override_na_vlan_id;
- i = TCA_VLAN_ACT_MODIFY;
- goto action_of_vlan;
- case ACTION_OF_SET_VLAN_PCP:
- if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
- goto trans;
- conf.of_set_vlan_pcp = action->conf;
- if (na_vlan_priority)
- goto override_na_vlan_priority;
- i = TCA_VLAN_ACT_MODIFY;
- goto action_of_vlan;
-action_of_vlan:
- act_index =
- mnl_attr_nest_start_check(buf, size, act_index_cur++);
- if (!act_index ||
- !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan"))
- goto error_nobufs;
- act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
- if (!act)
- goto error_nobufs;
- if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS,
- sizeof(struct tc_vlan),
- &(struct tc_vlan){
- .action = TC_ACT_PIPE,
- .v_action = i,
- }))
- goto error_nobufs;
- if (i == TCA_VLAN_ACT_POP) {
- mnl_attr_nest_end(buf, act);
- mnl_attr_nest_end(buf, act_index);
- ++action;
- break;
- }
- if (i == TCA_VLAN_ACT_PUSH &&
- !mnl_attr_put_u16_check(buf, size,
- TCA_VLAN_PUSH_VLAN_PROTOCOL,
- conf.of_push_vlan->ethertype))
- goto error_nobufs;
- na_vlan_id = mnl_nlmsg_get_payload_tail(buf);
- if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0))
- goto error_nobufs;
- na_vlan_priority = mnl_nlmsg_get_payload_tail(buf);
- if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0))
- goto error_nobufs;
- mnl_attr_nest_end(buf, act);
- mnl_attr_nest_end(buf, act_index);
- if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
-override_na_vlan_id:
- na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID;
- *(uint16_t *)mnl_attr_get_payload(na_vlan_id) =
- rte_be_to_cpu_16
- (conf.of_set_vlan_vid->vlan_vid);
- } else if (action->type ==
- RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
-override_na_vlan_priority:
- na_vlan_priority->nla_type =
- TCA_VLAN_PUSH_VLAN_PRIORITY;
- *(uint8_t *)mnl_attr_get_payload(na_vlan_priority) =
- conf.of_set_vlan_pcp->vlan_pcp;
- }
- ++action;
- break;
- case END:
- if (item->type != RTE_FLOW_ITEM_TYPE_END ||
- action->type != RTE_FLOW_ACTION_TYPE_END)
- goto trans;
- if (na_flower_act)
- mnl_attr_nest_end(buf, na_flower_act);
- if (na_flower)
- mnl_attr_nest_end(buf, na_flower);
- nlh = buf;
- return nlh->nlmsg_len;
- }
- back = trans;
- trans = mlx5_nl_flow_trans[trans[n - 1]];
- n = 0;
- goto trans;
-error_nobufs:
- if (buf != buf_tmp) {
- buf = buf_tmp;
- size = sizeof(buf_tmp);
- goto init;
- }
- return rte_flow_error_set
- (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "generated TC message is too large");
-}
-
-/**
- * Brand rtnetlink buffer with unique handle.
- *
- * This handle should be unique for a given network interface to avoid
- * collisions.
- *
- * @param buf
- * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
- * @param handle
- * Unique 32-bit handle to use.
- */
-void
-mlx5_nl_flow_brand(void *buf, uint32_t handle)
-{
- struct tcmsg *tcm = mnl_nlmsg_get_payload(buf);
-
- tcm->tcm_handle = handle;
-}
-
-/**
- * Send Netlink message with acknowledgment.
- *
- * @param nl
- * Libmnl socket to use.
- * @param nlh
- * Message to send. This function always raises the NLM_F_ACK flag before
- * sending.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_nl_flow_nl_ack(struct mnl_socket *nl, struct nlmsghdr *nlh)
-{
- alignas(struct nlmsghdr)
- uint8_t ans[mnl_nlmsg_size(sizeof(struct nlmsgerr)) +
- nlh->nlmsg_len - sizeof(*nlh)];
- uint32_t seq = random();
- int ret;
-
- nlh->nlmsg_flags |= NLM_F_ACK;
- nlh->nlmsg_seq = seq;
- ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
- if (ret != -1)
- ret = mnl_socket_recvfrom(nl, ans, sizeof(ans));
- if (ret != -1)
- ret = mnl_cb_run
- (ans, ret, seq, mnl_socket_get_portid(nl), NULL, NULL);
- if (!ret)
- return 0;
- rte_errno = errno;
- return -rte_errno;
-}
-
-/**
- * Create a Netlink flow rule.
- *
- * @param nl
- * Libmnl socket to use.
- * @param buf
- * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
- struct rte_flow_error *error)
-{
- struct nlmsghdr *nlh = buf;
-
- nlh->nlmsg_type = RTM_NEWTFILTER;
- nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
- if (!mlx5_nl_flow_nl_ack(nl, nlh))
- return 0;
- return rte_flow_error_set
- (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "netlink: failed to create TC flow rule");
-}
-
-/**
- * Destroy a Netlink flow rule.
- *
- * @param nl
- * Libmnl socket to use.
- * @param buf
- * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
- struct rte_flow_error *error)
-{
- struct nlmsghdr *nlh = buf;
-
- nlh->nlmsg_type = RTM_DELTFILTER;
- nlh->nlmsg_flags = NLM_F_REQUEST;
- if (!mlx5_nl_flow_nl_ack(nl, nlh))
- return 0;
- return rte_flow_error_set
- (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "netlink: failed to destroy TC flow rule");
-}
-
-/**
- * Initialize ingress qdisc of a given network interface.
- *
- * @param nl
- * Libmnl socket of the @p NETLINK_ROUTE kind.
- * @param ifindex
- * Index of network interface to initialize.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
- struct rte_flow_error *error)
-{
- struct nlmsghdr *nlh;
- struct tcmsg *tcm;
- alignas(struct nlmsghdr)
- uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)];
-
- /* Destroy existing ingress qdisc and everything attached to it. */
- nlh = mnl_nlmsg_put_header(buf);
- nlh->nlmsg_type = RTM_DELQDISC;
- nlh->nlmsg_flags = NLM_F_REQUEST;
- tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
- tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = ifindex;
- tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
- tcm->tcm_parent = TC_H_INGRESS;
- /* Ignore errors when qdisc is already absent. */
- if (mlx5_nl_flow_nl_ack(nl, nlh) &&
- rte_errno != EINVAL && rte_errno != ENOENT)
- return rte_flow_error_set
- (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "netlink: failed to remove ingress qdisc");
- /* Create fresh ingress qdisc. */
- nlh = mnl_nlmsg_put_header(buf);
- nlh->nlmsg_type = RTM_NEWQDISC;
- nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
- tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
- tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = ifindex;
- tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
- tcm->tcm_parent = TC_H_INGRESS;
- mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
- if (mlx5_nl_flow_nl_ack(nl, nlh))
- return rte_flow_error_set
- (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "netlink: failed to create ingress qdisc");
- return 0;
-}
-
-/**
- * Create and configure a libmnl socket for Netlink flow rules.
- *
- * @return
- * A valid libmnl socket object pointer on success, NULL otherwise and
- * rte_errno is set.
- */
-struct mnl_socket *
-mlx5_nl_flow_socket_create(void)
-{
- struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
-
- if (nl) {
- mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
- sizeof(int));
- if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
- return nl;
- }
- rte_errno = errno;
- if (nl)
- mnl_socket_close(nl);
- return NULL;
-}
-
-/**
- * Destroy a libmnl socket.
- */
-void
-mlx5_nl_flow_socket_destroy(struct mnl_socket *nl)
-{
- mnl_socket_close(nl);
-}
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 0870d32f..29742b13 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -159,7 +159,7 @@ struct mlx5_wqe_eth_seg_small {
uint8_t cs_flags;
uint8_t rsvd1;
uint16_t mss;
- uint32_t rsvd2;
+ uint32_t flow_table_metadata;
uint16_t inline_hdr_sz;
uint8_t inline_hdr[2];
} __rte_aligned(MLX5_WQE_DWORD_SIZE);
@@ -280,6 +280,226 @@ struct mlx5_cqe {
/* CQE format value. */
#define MLX5_COMPRESSED 0x3
+/* The field of packet to be modified. */
+enum mlx5_modificaiton_field {
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+};
+
+/* Modification sub command. */
+struct mlx5_modification_cmd {
+ union {
+ uint32_t data0;
+ struct {
+ unsigned int bits:5;
+ unsigned int rsvd0:3;
+ unsigned int src_offset:5; /* Start bit offset. */
+ unsigned int rsvd1:3;
+ unsigned int src_field:12;
+ unsigned int type:4;
+ };
+ };
+ union {
+ uint32_t data1;
+ uint8_t data[4];
+ struct {
+ unsigned int rsvd2:8;
+ unsigned int dst_offset:8;
+ unsigned int dst_field:12;
+ unsigned int rsvd3:4;
+ };
+ };
+};
+
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \
+ (&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \
+ (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \
+ __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
+#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \
+ (__mlx5_bit_off(typ, fld) & 0xf))
+#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_DB(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
+ __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | \
+ (((_v) & __mlx5_mask(typ, fld)) << \
+ __mlx5_dw_bit_off(typ, fld))); \
+ } while (0)
+#define MLX5_GET16(typ, p, fld) \
+ ((rte_be_to_cpu_16(*((__be16 *)(p) + \
+ __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
+ __mlx5_mask16(typ, fld))
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 reserved_at_0[0x8];
+ u8 source_sqn[0x18];
+ u8 reserved_at_20[0x10];
+ u8 source_port[0x10];
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+ u8 outer_second_cvlan_tag[0x1];
+ u8 inner_second_cvlan_tag[0x1];
+ u8 outer_second_svlan_tag[0x1];
+ u8 inner_second_svlan_tag[0x1];
+ u8 reserved_at_64[0xc];
+ u8 gre_protocol[0x10];
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+ u8 vxlan_vni[0x18];
+ u8 reserved_at_b8[0x8];
+ u8 reserved_at_c0[0x20];
+ u8 reserved_at_e0[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+ u8 reserved_at_100[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
+ u8 frag[0x1];
+ u8 ip_version[0x4];
+ u8 tcp_flags[0x9];
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+ u8 reserved_at_c0[0x20];
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+};
+
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+ u8 reserved_at_80[0x100];
+ u8 metadata_reg_a[0x20];
+ u8 reserved_at_1a0[0x60];
+};
+
+/* Flow matcher. */
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+ u8 reserved_at_800[0x800];
+};
+
+enum {
+ MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 1f7bfd44..ed993ea6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -388,7 +388,6 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_JUMBO_FRAME);
- offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
if (config->hw_fcs_strip)
offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
@@ -1438,7 +1437,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.crc_present = 0;
- if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
} else {
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2d14f8a6..24a054d5 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -523,6 +523,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
uint32_t swp_offsets = 0;
uint8_t swp_types = 0;
+ rte_be32_t metadata;
uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
@@ -566,6 +567,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
cs_flags = txq_ol_cksum_to_cs(buf);
txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
@@ -781,7 +785,7 @@ next_pkt:
swp_offsets,
cs_flags | (swp_types << 8) |
(rte_cpu_to_be_16(tso_segsz) << 16),
- 0,
+ metadata,
(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
} else {
@@ -795,7 +799,7 @@ next_pkt:
wqe->eseg = (rte_v128u32_t){
swp_offsets,
cs_flags | (swp_types << 8),
- 0,
+ metadata,
(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
}
@@ -861,7 +865,7 @@ mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.rsvd0 = 0;
mpw->wqe->eseg.rsvd1 = 0;
- mpw->wqe->eseg.rsvd2 = 0;
+ mpw->wqe->eseg.flow_table_metadata = 0;
mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_TSO);
@@ -948,6 +952,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint32_t length;
unsigned int segs_n = buf->nb_segs;
uint32_t cs_flags;
+ rte_be32_t metadata;
/*
* Make sure there is enough room to store this packet and
@@ -964,6 +969,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
max_elts -= segs_n;
--pkts_n;
cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Retrieve packet information. */
length = PKT_LEN(buf);
assert(length);
@@ -971,6 +979,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
((mpw.len != length) ||
(segs_n != 1) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags)))
mlx5_mpw_close(txq, &mpw);
if (mpw.state == MLX5_MPW_STATE_CLOSED) {
@@ -984,6 +993,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
}
/* Multi-segment packets must be alone in their MPW. */
assert((segs_n == 1) || (mpw.pkts_n == 0));
@@ -1082,7 +1092,7 @@ mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
mpw->wqe->eseg.cs_flags = 0;
mpw->wqe->eseg.rsvd0 = 0;
mpw->wqe->eseg.rsvd1 = 0;
- mpw->wqe->eseg.rsvd2 = 0;
+ mpw->wqe->eseg.flow_table_metadata = 0;
inl = (struct mlx5_wqe_inl_small *)
(((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
mpw->data.raw = (uint8_t *)&inl->raw;
@@ -1172,6 +1182,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
uint32_t length;
unsigned int segs_n = buf->nb_segs;
uint8_t cs_flags;
+ rte_be32_t metadata;
/*
* Make sure there is enough room to store this packet and
@@ -1193,18 +1204,23 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
*/
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if packet differs. */
if (mpw.state == MLX5_MPW_STATE_OPENED) {
if ((mpw.len != length) ||
(segs_n != 1) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags))
mlx5_mpw_close(txq, &mpw);
} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
if ((mpw.len != length) ||
(segs_n != 1) ||
(length > inline_room) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags)) {
mlx5_mpw_inline_close(txq, &mpw);
inline_room =
@@ -1224,12 +1240,14 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
} else {
if (unlikely(max_wqe < wqe_inl_n))
break;
max_wqe -= wqe_inl_n;
mlx5_mpw_inline_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
}
}
/* Multi-segment packets must be alone in their MPW. */
@@ -1461,6 +1479,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
unsigned int do_inline = 0; /* Whether inline is possible. */
uint32_t length;
uint8_t cs_flags;
+ rte_be32_t metadata;
/* Multi-segmented packet is handled in slow-path outside. */
assert(NB_SEGS(buf) == 1);
@@ -1468,6 +1487,9 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
if (max_elts - j == 0)
break;
cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if:
@@ -1482,6 +1504,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
(length <= txq->inline_max_packet_sz &&
inl_pad + sizeof(inl_hdr) + length >
mpw_room) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags))
max_wqe -= mlx5_empw_close(txq, &mpw);
}
@@ -1505,6 +1528,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
sizeof(inl_hdr) + length <= mpw_room &&
!txq->mpw_hdr_dseg;
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
} else {
/* Evaluate whether the next packet can be inlined.
* Inlininig is possible when:
@@ -2097,7 +2121,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
unsigned int i = 0;
- uint16_t rq_ci = rxq->rq_ci;
+ uint32_t rq_ci = rxq->rq_ci;
uint16_t consumed_strd = rxq->consumed_strd;
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
@@ -2324,7 +2348,7 @@ removed_rx_burst(void *dpdk_txq __rte_unused,
* (e.g. mlx5_rxtx_vec_sse.c for x86).
*/
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
@@ -2332,7 +2356,7 @@ mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
@@ -2340,7 +2364,7 @@ mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
@@ -2348,25 +2372,25 @@ mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
return 0;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
{
return -ENOTSUP;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 48ed2b20..1db468c3 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -97,10 +97,10 @@ struct mlx5_rxq_data {
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
- uint16_t rq_ci;
+ uint32_t rq_ci;
uint16_t consumed_strd; /* Number of consumed strides in WQE. */
- uint16_t rq_pi;
- uint16_t cq_ci;
+ uint32_t rq_pi;
+ uint32_t cq_ci;
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
volatile void *wqes;
@@ -363,6 +363,8 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+ struct rte_mempool *mp);
/**
* Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
@@ -607,6 +609,24 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx5_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
+/**
* Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
* as mempool is pre-configured and static.
*
@@ -664,7 +684,20 @@ mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
return mlx5_tx_addr2mr_bh(txq, addr);
}
-#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+static __rte_always_inline uint32_t
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey = mlx5_tx_addr2mr(txq, addr);
+
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ if (rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+ }
+ return lkey;
+}
/**
* Ring TX queue doorbell and flush the update if requested.
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 0a4aed8f..1453f4ff 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -40,7 +40,8 @@
#endif
/**
- * Count the number of packets having same ol_flags and calculate cs_flags.
+ * Count the number of packets having same ol_flags and same metadata (if
+ * PKT_TX_METADATA is set in ol_flags), and calculate cs_flags.
*
* @param pkts
* Pointer to array of packets.
@@ -48,26 +49,45 @@
* Number of packets.
* @param cs_flags
* Pointer of flags to be returned.
+ * @param metadata
+ * Pointer of metadata to be returned.
+ * @param txq_offloads
+ * Offloads enabled on Tx queue
*
* @return
- * Number of packets having same ol_flags.
+ * Number of packets having same ol_flags and metadata, if relevant.
*/
static inline unsigned int
-txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags,
+ rte_be32_t *metadata, const uint64_t txq_offloads)
{
unsigned int pos;
- const uint64_t ol_mask =
+ const uint64_t cksum_ol_mask =
PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
+ rte_be32_t p0_metadata, pn_metadata;
if (!pkts_n)
return 0;
- /* Count the number of packets having same ol_flags. */
- for (pos = 1; pos < pkts_n; ++pos)
- if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
+ p0_metadata = pkts[0]->ol_flags & PKT_TX_METADATA ?
+ pkts[0]->tx_metadata : 0;
+ /* Count the number of packets having same offload parameters. */
+ for (pos = 1; pos < pkts_n; ++pos) {
+ /* Check if packet has same checksum flags. */
+ if ((txq_offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP) &&
+ ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & cksum_ol_mask))
break;
+ /* Check if packet has same metadata. */
+ if (txq_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
+ pn_metadata = pkts[pos]->ol_flags & PKT_TX_METADATA ?
+ pkts[pos]->tx_metadata : 0;
+ if (pn_metadata != p0_metadata)
+ break;
+ }
+ }
*cs_flags = txq_ol_cksum_to_cs(pkts[0]);
+ *metadata = p0_metadata;
return pos;
}
@@ -96,7 +116,7 @@ mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t ret;
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, 0, 0);
nb_tx += ret;
if (!ret)
break;
@@ -127,6 +147,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint8_t cs_flags = 0;
uint16_t n;
uint16_t ret;
+ rte_be32_t metadata = 0;
/* Transmit multi-seg packets in the head of pkts list. */
if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
@@ -137,9 +158,12 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
- if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
- n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
+ if (txq->offloads & (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP |
+ DEV_TX_OFFLOAD_MATCH_METADATA))
+ n = txq_calc_offload(&pkts[nb_tx], n,
+ &cs_flags, &metadata,
+ txq->offloads);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags, metadata);
nb_tx += ret;
if (!ret)
break;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index fb884f92..fda7004e 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -22,6 +22,7 @@
/* HW offload capabilities of vectorized Tx. */
#define MLX5_VEC_TX_OFFLOAD_CAP \
(MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
+ DEV_TX_OFFLOAD_MATCH_METADATA | \
DEV_TX_OFFLOAD_MULTI_SEGS)
/*
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index b37b7381..0b729f18 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -201,13 +201,15 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
* Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
* @param cs_flags
* Checksum offload flags to be written in the descriptor.
+ * @param metadata
+ * Metadata value to be written in the descriptor.
*
* @return
* Number of packets successfully transmitted (<= pkts_n).
*/
static inline uint16_t
txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t cs_flags)
+ uint8_t cs_flags, rte_be32_t metadata)
{
struct rte_mbuf **elts;
uint16_t elts_head = txq->elts_head;
@@ -293,11 +295,8 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
- vst1q_u8((void *)(t_wqe + 1),
- ((uint8x16_t) { 0, 0, 0, 0,
- cs_flags, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 }));
+ vst1q_u32((void *)(t_wqe + 1),
+ ((uint32x4_t) { 0, cs_flags, metadata, 0 }));
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += pkts_n;
#endif
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 54b3783c..e0f95f92 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -202,13 +202,15 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
* Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
* @param cs_flags
* Checksum offload flags to be written in the descriptor.
+ * @param metadata
+ * Metadata value to be written in the descriptor.
*
* @return
* Number of packets successfully transmitted (<= pkts_n).
*/
static inline uint16_t
txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t cs_flags)
+ uint8_t cs_flags, rte_be32_t metadata)
{
struct rte_mbuf **elts;
uint16_t elts_head = txq->elts_head;
@@ -292,11 +294,7 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
_mm_store_si128(t_wqe, ctrl);
/* Fill ESEG in the header. */
- _mm_store_si128(t_wqe + 1,
- _mm_set_epi8(0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, cs_flags,
- 0, 0, 0, 0));
+ _mm_store_si128(t_wqe + 1, _mm_set_epi32(0, metadata, cs_flags, 0));
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += pkts_n;
#endif
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index a3a52291..00106171 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -3,8 +3,6 @@
* Copyright 2016 Mellanox Technologies, Ltd
*/
-#define _GNU_SOURCE
-
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 91f3d474..a14d1e49 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -17,14 +17,6 @@
#include "mlx5_rxtx.h"
#include "mlx5_defs.h"
-struct mlx5_counter_ctrl {
- /* Name of the counter. */
- char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
- /* Name of the counter on the device table. */
- char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
- uint32_t ib:1; /**< Nonzero for IB counters. */
-};
-
static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
{
.dpdk_name = "rx_port_unicast_bytes",
@@ -115,6 +107,23 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
.dpdk_name = "rx_bytes_phy",
.ctr_name = "rx_bytes_phy",
},
+ /* Representor only */
+ {
+ .dpdk_name = "rx_packets",
+ .ctr_name = "vport_rx_packets",
+ },
+ {
+ .dpdk_name = "rx_bytes",
+ .ctr_name = "vport_rx_bytes",
+ },
+ {
+ .dpdk_name = "tx_packets",
+ .ctr_name = "vport_tx_packets",
+ },
+ {
+ .dpdk_name = "tx_bytes",
+ .ctr_name = "vport_tx_bytes",
+ },
};
static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
@@ -146,19 +155,19 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
et_stats->cmd = ETHTOOL_GSTATS;
et_stats->n_stats = xstats_ctrl->stats_n;
ifr.ifr_data = (caddr_t)et_stats;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(WARNING,
"port %u unable to read statistic values from device",
dev->data->port_id);
return ret;
}
- for (i = 0; i != xstats_n; ++i) {
- if (mlx5_counters_init[i].ib) {
+ for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) {
+ if (xstats_ctrl->info[i].ib) {
FILE *file;
MKSTR(path, "%s/ports/1/hw_counters/%s",
priv->ibdev_path,
- mlx5_counters_init[i].ctr_name);
+ xstats_ctrl->info[i].ctr_name);
file = fopen(path, "rb");
if (file) {
@@ -194,7 +203,7 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
drvinfo.cmd = ETHTOOL_GDRVINFO;
ifr.ifr_data = (caddr_t)&drvinfo;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(WARNING, "port %u unable to query number of statistics",
dev->data->port_id);
@@ -222,6 +231,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
unsigned int str_sz;
int ret;
+ /* So that it won't aggregate for each init. */
+ xstats_ctrl->mlx5_stats_n = 0;
ret = mlx5_ethtool_get_stats_n(dev);
if (ret < 0) {
DRV_LOG(WARNING, "port %u no extended statistics available",
@@ -229,7 +240,6 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
return;
}
dev_stats_n = ret;
- xstats_ctrl->stats_n = dev_stats_n;
/* Allocate memory to grab stat names and values. */
str_sz = dev_stats_n * ETH_GSTRING_LEN;
strings = (struct ethtool_gstrings *)
@@ -244,14 +254,12 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
strings->string_set = ETH_SS_STATS;
strings->len = dev_stats_n;
ifr.ifr_data = (caddr_t)strings;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
DRV_LOG(WARNING, "port %u unable to get statistic names",
dev->data->port_id);
goto free;
}
- for (j = 0; j != xstats_n; ++j)
- xstats_ctrl->dev_table_idx[j] = dev_stats_n;
for (i = 0; i != dev_stats_n; ++i) {
const char *curr_string = (const char *)
&strings->data[i * ETH_GSTRING_LEN];
@@ -259,24 +267,25 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
for (j = 0; j != xstats_n; ++j) {
if (!strcmp(mlx5_counters_init[j].ctr_name,
curr_string)) {
- xstats_ctrl->dev_table_idx[j] = i;
+ unsigned int idx = xstats_ctrl->mlx5_stats_n++;
+
+ xstats_ctrl->dev_table_idx[idx] = i;
+ xstats_ctrl->info[idx] = mlx5_counters_init[j];
break;
}
}
}
- for (j = 0; j != xstats_n; ++j) {
- if (mlx5_counters_init[j].ib)
- continue;
- if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
- DRV_LOG(WARNING,
- "port %u counter \"%s\" is not recognized",
- dev->data->port_id,
- mlx5_counters_init[j].dpdk_name);
- goto free;
+ /* Add IB counters. */
+ for (i = 0; i != xstats_n; ++i) {
+ if (mlx5_counters_init[i].ib) {
+ unsigned int idx = xstats_ctrl->mlx5_stats_n++;
+
+ xstats_ctrl->info[idx] = mlx5_counters_init[i];
}
}
+ assert(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
+ xstats_ctrl->stats_n = dev_stats_n;
/* Copy to base at first time. */
- assert(xstats_n <= MLX5_MAX_XSTATS);
ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
if (ret)
DRV_LOG(ERR, "port %u cannot read device counters: %s",
@@ -306,9 +315,10 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
struct priv *priv = dev->data->dev_private;
unsigned int i;
uint64_t counters[n];
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n;
- if (n >= xstats_n && stats) {
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ if (n >= mlx5_stats_n && stats) {
int stats_n;
int ret;
@@ -320,12 +330,12 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
ret = mlx5_read_dev_counters(dev, counters);
if (ret)
return ret;
- for (i = 0; i != xstats_n; ++i) {
+ for (i = 0; i != mlx5_stats_n; ++i) {
stats[i].id = i;
stats[i].value = (counters[i] - xstats_ctrl->base[i]);
}
}
- return xstats_n;
+ return mlx5_stats_n;
}
/**
@@ -441,7 +451,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
unsigned int i;
- unsigned int n = xstats_n;
+ unsigned int n = xstats_ctrl->mlx5_stats_n;
uint64_t counters[n];
int ret;
@@ -481,14 +491,17 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
unsigned int i;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int mlx5_xstats_n = xstats_ctrl->mlx5_stats_n;
- if (n >= xstats_n && xstats_names) {
- for (i = 0; i != xstats_n; ++i) {
+ if (n >= mlx5_xstats_n && xstats_names) {
+ for (i = 0; i != mlx5_xstats_n; ++i) {
strncpy(xstats_names[i].name,
- mlx5_counters_init[i].dpdk_name,
+ xstats_ctrl->info[i].dpdk_name,
RTE_ETH_XSTATS_NAME_SIZE);
xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
}
}
- return xstats_n;
+ return mlx5_xstats_n;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index f9bc4739..b01bd675 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -120,7 +120,6 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
DEV_TX_OFFLOAD_UDP_TNL_TSO);
}
-
if (config->tunnel_en) {
if (config->hw_csum)
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
@@ -128,6 +127,10 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
}
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (config->dv_flow_en)
+ offloads |= DEV_TX_OFFLOAD_MATCH_METADATA;
+#endif
return offloads;
}
diff --git a/drivers/net/mvneta/Makefile b/drivers/net/mvneta/Makefile
new file mode 100644
index 00000000..05a0487c
--- /dev/null
+++ b/drivers/net/mvneta/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvneta.a
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvneta_version.map
+
+# external library dependencies
+CFLAGS += -I$(RTE_SDK)/drivers/common/mvep
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_SIZE=64
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
+LDLIBS += -lrte_bus_vdev -lrte_common_mvep
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_MVNETA_PMD) += mvneta_ethdev.c mvneta_rxtx.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mvneta/meson.build b/drivers/net/mvneta/meson.build
new file mode 100644
index 00000000..c0b1bce0
--- /dev/null
+++ b/drivers/net/mvneta/meson.build
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += [
+ '-DMVCONF_TYPES_PUBLIC',
+ '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC',
+ '-DMVCONF_DMA_PHYS_ADDR_T_SIZE=64'
+ ]
+endif
+
+sources = files(
+ 'mvneta_ethdev.c',
+ 'mvneta_rxtx.c'
+)
+
+deps += ['cfgfile', 'common_mvep']
diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c
new file mode 100644
index 00000000..2d766645
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_ethdev.c
@@ -0,0 +1,987 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_kvargs.h>
+#include <rte_bus_vdev.h>
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <rte_mvep_common.h>
+
+#include "mvneta_rxtx.h"
+
+
+#define MVNETA_IFACE_NAME_ARG "iface"
+
+#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CHECKSUM)
+
+/** Port Tx offloads capabilities */
+#define MVNETA_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define MVNETA_PKT_SIZE_MAX (16382 - MV_MH_SIZE) /* 9700B */
+#define MVNETA_DEFAULT_MTU 1500
+
+#define MVNETA_MAC_ADDRS_MAX 256 /*16 UC, 256 IP, 256 MC/BC */
+/** Maximum length of a match string */
+#define MVNETA_MATCH_LEN 16
+
+int mvneta_logtype;
+
+static const char * const valid_args[] = {
+ MVNETA_IFACE_NAME_ARG,
+ NULL
+};
+
+struct mvneta_ifnames {
+ const char *names[NETA_NUM_ETH_PPIO];
+ int idx;
+};
+
+static int mvneta_dev_num;
+
+/**
+ * Deinitialize packet processor.
+ */
+static void
+mvneta_neta_deinit(void)
+{
+ neta_deinit();
+}
+
+/**
+ * Initialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_neta_init(void)
+{
+ return neta_init();
+}
+
+/**
+ * Callback used by rte_kvargs_process() during argument parsing.
+ *
+ * @param key
+ * Pointer to the parsed key (unused).
+ * @param value
+ * Pointer to the parsed value.
+ * @param extra_args
+ * Pointer to the extra arguments which contains address of the
+ * table of pointers to parsed interface names.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mvneta_ifnames_get(const char *key __rte_unused, const char *value,
+ void *extra_args)
+{
+ struct mvneta_ifnames *ifnames = extra_args;
+
+ ifnames->names[ifnames->idx++] = value;
+
+ return 0;
+}
+
+/**
+ * Ethernet device configuration.
+ *
+ * Prepare the driver for a given number of TX and RX queues and
+ * configure RSS if supported.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_dev_configure(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct neta_ppio_params *ppio_params;
+
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
+ MVNETA_LOG(INFO, "Unsupported RSS and rx multi queue mode %d",
+ dev->data->dev_conf.rxmode.mq_mode);
+ if (dev->data->nb_rx_queues > 1)
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.split_hdr_size) {
+ MVNETA_LOG(INFO, "Split headers not supported");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ MRVL_NETA_ETH_HDRS_LEN;
+
+ if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ priv->multiseg = 1;
+
+ ppio_params = &priv->ppio_params;
+ ppio_params->outqs_params.num_outqs = dev->data->nb_tx_queues;
+ /* Default: 1 TC, no QoS supported. */
+ ppio_params->inqs_params.num_tcs = 1;
+ ppio_params->inqs_params.tcs_params[0].pkt_offset = MRVL_NETA_PKT_OFFS;
+ priv->ppio_id = dev->data->port_id;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ * @param info
+ * Info structure output buffer.
+ */
+static void
+mvneta_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *info)
+{
+ info->speed_capa = ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G;
+
+ info->max_rx_queues = MRVL_NETA_RXQ_MAX;
+ info->max_tx_queues = MRVL_NETA_TXQ_MAX;
+ info->max_mac_addrs = MVNETA_MAC_ADDRS_MAX;
+
+ info->rx_desc_lim.nb_max = MRVL_NETA_RXD_MAX;
+ info->rx_desc_lim.nb_min = MRVL_NETA_RXD_MIN;
+ info->rx_desc_lim.nb_align = MRVL_NETA_RXD_ALIGN;
+
+ info->tx_desc_lim.nb_max = MRVL_NETA_TXD_MAX;
+ info->tx_desc_lim.nb_min = MRVL_NETA_TXD_MIN;
+ info->tx_desc_lim.nb_align = MRVL_NETA_TXD_ALIGN;
+
+ info->rx_offload_capa = MVNETA_RX_OFFLOADS;
+ info->rx_queue_offload_capa = MVNETA_RX_OFFLOADS;
+
+ info->tx_offload_capa = MVNETA_TX_OFFLOADS;
+ info->tx_queue_offload_capa = MVNETA_TX_OFFLOADS;
+
+ /* By default packets are dropped if no descriptors are available */
+ info->default_rxconf.rx_drop_en = 1;
+ /* Deferred tx queue start is not supported */
+ info->default_txconf.tx_deferred_start = 0;
+ info->default_txconf.offloads = 0;
+
+ info->max_rx_pktlen = MVNETA_PKT_SIZE_MAX;
+}
+
+/**
+ * Return supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ *
+ * @return
+ * Const pointer to the table with supported packet types.
+ */
+static const uint32_t *
+mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP
+ };
+
+ return ptypes;
+}
+
+/**
+ * DPDK callback to change the MTU.
+ *
+ * Setting the MTU affects hardware MRU (packets larger than the MRU
+ * will be dropped).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mtu
+ * New MTU.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ uint16_t mbuf_data_size = 0; /* SW buffer size */
+ uint16_t mru;
+ int ret;
+
+ mru = MRVL_NETA_MTU_TO_MRU(mtu);
+ /*
+ * min_rx_buf_size is equal to mbuf data size
+ * if pmd didn't set it differently
+ */
+ mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+ /* Prevent PMD from:
+ * - setting mru greater than the mbuf size resulting in
+ * hw and sw buffer size mismatch
+ * - setting mtu that requires the support of scattered packets
+ * when this feature has not been enabled/supported so far.
+ */
+ if (!dev->data->scattered_rx &&
+ (mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) {
+ mru = mbuf_data_size - MRVL_NETA_PKT_OFFS;
+ mtu = MRVL_NETA_MRU_TO_MTU(mru);
+ MVNETA_LOG(WARNING, "MTU too big, max MTU possible limitted by"
+ " current mbuf size: %u. Set MTU to %u, MRU to %u",
+ mbuf_data_size, mtu, mru);
+ }
+
+ if (mtu < ETHER_MIN_MTU || mru > MVNETA_PKT_SIZE_MAX) {
+ MVNETA_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
+ return -EINVAL;
+ }
+
+ dev->data->mtu = mtu;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
+
+ if (!priv->ppio)
+ /* It is OK. New MTU will be set later on mvneta_dev_start */
+ return 0;
+
+ ret = neta_ppio_set_mru(priv->ppio, mru);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to change MRU");
+ return ret;
+ }
+
+ ret = neta_ppio_set_mtu(priv->ppio, mtu);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to change MTU");
+ return ret;
+ }
+ MVNETA_LOG(INFO, "MTU changed to %u, MRU = %u", mtu, mru);
+
+ return 0;
+}
+
+/**
+ * DPDK callback to bring the link up.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return 0;
+
+ return neta_ppio_enable(priv->ppio);
+}
+
+/**
+ * DPDK callback to bring the link down.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return 0;
+
+ return neta_ppio_disable(priv->ppio);
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mvneta_dev_start(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ char match[MVNETA_MATCH_LEN];
+ int ret = 0, i;
+
+ if (priv->ppio)
+ return mvneta_dev_set_link_up(dev);
+
+ snprintf(match, sizeof(match), "%s", dev->data->name);
+ priv->ppio_params.match = match;
+ priv->ppio_params.inqs_params.mtu = dev->data->mtu;
+
+ ret = neta_ppio_init(&priv->ppio_params, &priv->ppio);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to init ppio");
+ return ret;
+ }
+ priv->ppio_id = priv->ppio->port_id;
+
+ /*
+ * In case there are some some stale uc/mc mac addresses flush them
+ * here. It cannot be done during mvneta_dev_close() as port information
+ * is already gone at that point (due to neta_ppio_deinit() in
+ * mvneta_dev_stop()).
+ */
+ if (!priv->uc_mc_flushed) {
+ ret = neta_ppio_flush_mac_addrs(priv->ppio, 0, 1);
+ if (ret) {
+ MVNETA_LOG(ERR,
+ "Failed to flush uc/mc filter list");
+ goto out;
+ }
+ priv->uc_mc_flushed = 1;
+ }
+
+ ret = mvneta_alloc_rx_bufs(dev);
+ if (ret)
+ goto out;
+
+ ret = mvneta_mtu_set(dev, dev->data->mtu);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to set MTU %d", dev->data->mtu);
+ goto out;
+ }
+
+ ret = mvneta_dev_set_link_up(dev);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to set link up");
+ goto out;
+ }
+
+ /* start tx queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ mvneta_set_tx_function(dev);
+
+ return 0;
+
+out:
+ MVNETA_LOG(ERR, "Failed to start device");
+ neta_ppio_deinit(priv->ppio);
+ return ret;
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_dev_stop(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ if (!priv->ppio)
+ return;
+
+ mvneta_dev_set_link_down(dev);
+ mvneta_flush_queues(dev);
+ neta_ppio_deinit(priv->ppio);
+
+ priv->ppio = NULL;
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_dev_close(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int i;
+
+ if (priv->ppio)
+ mvneta_dev_stop(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ mvneta_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ mvneta_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ /*
+ * TODO
+ * once MUSDK provides necessary API use it here
+ */
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct ethtool_cmd edata;
+ struct ifreq req;
+ int ret, fd, link_up;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ edata.cmd = ETHTOOL_GSET;
+
+ strcpy(req.ifr_name, dev->data->name);
+ req.ifr_data = (void *)&edata;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd == -1)
+ return -EFAULT;
+ ret = ioctl(fd, SIOCETHTOOL, &req);
+ if (ret == -1) {
+ close(fd);
+ return -EFAULT;
+ }
+
+ close(fd);
+
+ switch (ethtool_cmd_speed(&edata)) {
+ case SPEED_10:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+ break;
+ case SPEED_100:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case SPEED_1000:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case SPEED_2500:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+ default:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ }
+
+ dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
+ ETH_LINK_HALF_DUPLEX;
+ dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
+ ETH_LINK_FIXED;
+
+ neta_ppio_get_link_state(priv->ppio, &link_up);
+ dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv->ppio)
+ return;
+
+ neta_ppio_get_promisc(priv->ppio, &en);
+ if (en) {
+ MVNETA_LOG(INFO, "Promiscuous already enabled");
+ return;
+ }
+
+ ret = neta_ppio_set_promisc(priv->ppio, 1);
+ if (ret)
+ MVNETA_LOG(ERR, "Failed to enable promiscuous mode");
+}
+
+/**
+ * DPDK callback to disable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv->ppio)
+ return;
+
+ neta_ppio_get_promisc(priv->ppio, &en);
+ if (!en) {
+ MVNETA_LOG(INFO, "Promiscuous already disabled");
+ return;
+ }
+
+ ret = neta_ppio_set_promisc(priv->ppio, 0);
+ if (ret)
+ MVNETA_LOG(ERR, "Failed to disable promiscuous mode");
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+static void
+mvneta_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ if (!priv->ppio)
+ return;
+
+ ret = neta_ppio_remove_mac_addr(priv->ppio,
+ dev->data->mac_addrs[index].addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf),
+ &dev->data->mac_addrs[index]);
+ MVNETA_LOG(ERR, "Failed to remove mac %s", buf);
+ }
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (unused).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ if (index == 0)
+ /* For setting index 0, mrvl_mac_addr_set() should be used.*/
+ return -1;
+
+ if (!priv->ppio)
+ return 0;
+
+ ret = neta_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ MVNETA_LOG(ERR, "Failed to add mac %s", buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ */
+static int
+mvneta_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return -EINVAL;
+
+ ret = neta_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ MVNETA_LOG(ERR, "Failed to set mac to %s", buf);
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Stats structure output buffer.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct neta_ppio_statistics ppio_stats;
+ unsigned int ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ ret = neta_ppio_get_statistics(priv->ppio, &ppio_stats);
+ if (unlikely(ret)) {
+ MVNETA_LOG(ERR, "Failed to update port statistics");
+ return ret;
+ }
+
+ stats->ipackets += ppio_stats.rx_packets +
+ ppio_stats.rx_broadcast_packets +
+ ppio_stats.rx_multicast_packets -
+ priv->prev_stats.ipackets;
+ stats->opackets += ppio_stats.tx_packets +
+ ppio_stats.tx_broadcast_packets +
+ ppio_stats.tx_multicast_packets -
+ priv->prev_stats.opackets;
+ stats->ibytes += ppio_stats.rx_bytes - priv->prev_stats.ibytes;
+ stats->obytes += ppio_stats.tx_bytes - priv->prev_stats.obytes;
+ stats->imissed += ppio_stats.rx_discard +
+ ppio_stats.rx_overrun -
+ priv->prev_stats.imissed;
+
+ stats->ierrors = ppio_stats.rx_packets_err +
+ ppio_stats.rx_errors +
+ ppio_stats.rx_crc_error -
+ priv->prev_stats.ierrors;
+ stats->oerrors = ppio_stats.tx_errors - priv->prev_stats.oerrors;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mvneta_stats_reset(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ unsigned int ret;
+
+ if (!priv->ppio)
+ return;
+
+ ret = mvneta_stats_get(dev, &priv->prev_stats);
+ if (unlikely(ret))
+ RTE_LOG(ERR, PMD, "Failed to reset port statistics");
+}
+
+
+static const struct eth_dev_ops mvneta_ops = {
+ .dev_configure = mvneta_dev_configure,
+ .dev_start = mvneta_dev_start,
+ .dev_stop = mvneta_dev_stop,
+ .dev_set_link_up = mvneta_dev_set_link_up,
+ .dev_set_link_down = mvneta_dev_set_link_down,
+ .dev_close = mvneta_dev_close,
+ .link_update = mvneta_link_update,
+ .promiscuous_enable = mvneta_promiscuous_enable,
+ .promiscuous_disable = mvneta_promiscuous_disable,
+ .mac_addr_remove = mvneta_mac_addr_remove,
+ .mac_addr_add = mvneta_mac_addr_add,
+ .mac_addr_set = mvneta_mac_addr_set,
+ .mtu_set = mvneta_mtu_set,
+ .stats_get = mvneta_stats_get,
+ .stats_reset = mvneta_stats_reset,
+ .dev_infos_get = mvneta_dev_infos_get,
+ .dev_supported_ptypes_get = mvneta_dev_supported_ptypes_get,
+ .rxq_info_get = mvneta_rxq_info_get,
+ .txq_info_get = mvneta_txq_info_get,
+ .rx_queue_setup = mvneta_rx_queue_setup,
+ .rx_queue_release = mvneta_rx_queue_release,
+ .tx_queue_setup = mvneta_tx_queue_setup,
+ .tx_queue_release = mvneta_tx_queue_release,
+};
+
+/**
+ * Create device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port's name.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
+{
+ int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
+ struct rte_eth_dev *eth_dev;
+ struct mvneta_priv *priv;
+ struct ifreq req;
+
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return -ENOMEM;
+
+ priv = rte_zmalloc_socket(name, sizeof(*priv), 0, rte_socket_id());
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ eth_dev->data->dev_private = priv;
+
+ eth_dev->data->mac_addrs =
+ rte_zmalloc("mac_addrs",
+ ETHER_ADDR_LEN * MVNETA_MAC_ADDRS_MAX, 0);
+ if (!eth_dev->data->mac_addrs) {
+ MVNETA_LOG(ERR, "Failed to allocate space for eth addrs");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memset(&req, 0, sizeof(req));
+ strcpy(req.ifr_name, name);
+ ret = ioctl(fd, SIOCGIFHWADDR, &req);
+ if (ret)
+ goto out_free;
+
+ memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
+ req.ifr_addr.sa_data, ETHER_ADDR_LEN);
+
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->device = &vdev->device;
+ eth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;
+ mvneta_set_tx_function(eth_dev);
+ eth_dev->dev_ops = &mvneta_ops;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+out_free:
+ rte_eth_dev_release_port(eth_dev);
+
+ return ret;
+}
+
+/**
+ * Cleanup previously created device representing Ethernet port.
+ *
+ * @param eth_dev
+ * Pointer to the corresponding rte_eth_dev structure.
+ */
+static void
+mvneta_eth_dev_destroy(struct rte_eth_dev *eth_dev)
+{
+ rte_eth_dev_release_port(eth_dev);
+}
+
+/**
+ * Cleanup previously created device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port name.
+ */
+static void
+mvneta_eth_dev_destroy_name(const char *name)
+{
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev)
+ return;
+
+ mvneta_eth_dev_destroy(eth_dev);
+}
+
+/**
+ * DPDK callback to register the virtual device.
+ *
+ * @param vdev
+ * Pointer to the virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mvneta_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_kvargs *kvlist;
+ struct mvneta_ifnames ifnames;
+ int ret = -EINVAL;
+ uint32_t i, ifnum;
+ const char *params;
+
+ params = rte_vdev_device_args(vdev);
+ if (!params)
+ return -EINVAL;
+
+ kvlist = rte_kvargs_parse(params, valid_args);
+ if (!kvlist)
+ return -EINVAL;
+
+ ifnum = rte_kvargs_count(kvlist, MVNETA_IFACE_NAME_ARG);
+ if (ifnum > RTE_DIM(ifnames.names))
+ goto out_free_kvlist;
+
+ ifnames.idx = 0;
+ rte_kvargs_process(kvlist, MVNETA_IFACE_NAME_ARG,
+ mvneta_ifnames_get, &ifnames);
+
+ /*
+ * The below system initialization should be done only once,
+ * on the first provided configuration file
+ */
+ if (mvneta_dev_num)
+ goto init_devices;
+
+ MVNETA_LOG(INFO, "Perform MUSDK initializations");
+
+ ret = rte_mvep_init(MVEP_MOD_T_NETA, kvlist);
+ if (ret)
+ goto out_free_kvlist;
+
+ ret = mvneta_neta_init();
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to init NETA!");
+ rte_mvep_deinit(MVEP_MOD_T_NETA);
+ goto out_free_kvlist;
+ }
+
+init_devices:
+ for (i = 0; i < ifnum; i++) {
+ MVNETA_LOG(INFO, "Creating %s", ifnames.names[i]);
+ ret = mvneta_eth_dev_create(vdev, ifnames.names[i]);
+ if (ret)
+ goto out_cleanup;
+ }
+ mvneta_dev_num += ifnum;
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+out_cleanup:
+ for (; i > 0; i--)
+ mvneta_eth_dev_destroy_name(ifnames.names[i]);
+
+ if (mvneta_dev_num == 0) {
+ mvneta_neta_deinit();
+ rte_mvep_deinit(MVEP_MOD_T_NETA);
+ }
+out_free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/**
+ * DPDK callback to remove virtual device.
+ *
+ * @param vdev
+ * Pointer to the removed virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mvneta_remove(struct rte_vdev_device *vdev)
+{
+ int i;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (!name)
+ return -EINVAL;
+
+ MVNETA_LOG(INFO, "Removing %s", name);
+
+ RTE_ETH_FOREACH_DEV(i) {
+ if (rte_eth_devices[i].device != &vdev->device)
+ continue;
+
+ mvneta_eth_dev_destroy(&rte_eth_devices[i]);
+ mvneta_dev_num--;
+ }
+
+ if (mvneta_dev_num == 0) {
+ MVNETA_LOG(INFO, "Perform MUSDK deinit");
+ mvneta_neta_deinit();
+ rte_mvep_deinit(MVEP_MOD_T_NETA);
+ }
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_mvneta_drv = {
+ .probe = rte_pmd_mvneta_probe,
+ .remove = rte_pmd_mvneta_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_mvneta, pmd_mvneta_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_mvneta, "iface=<ifc>");
+
+RTE_INIT(mvneta_init_log)
+{
+ mvneta_logtype = rte_log_register("pmd.net.mvneta");
+ if (mvneta_logtype >= 0)
+ rte_log_set_level(mvneta_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/mvneta/mvneta_ethdev.h b/drivers/net/mvneta/mvneta_ethdev.h
new file mode 100644
index 00000000..101b0a81
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_ethdev.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MVNETA_ETHDEV_H_
+#define _MVNETA_ETHDEV_H_
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+/*
+ * container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+
+#include <drivers/mv_neta.h>
+#include <drivers/mv_neta_ppio.h>
+
+/** Packet offset inside RX buffer. */
+#define MRVL_NETA_PKT_OFFS 64
+
+/** Maximum number of rx/tx queues per port */
+#define MRVL_NETA_RXQ_MAX 8
+#define MRVL_NETA_TXQ_MAX 8
+
+/** Minimum/maximum number of descriptors in tx queue */
+#define MRVL_NETA_TXD_MIN 16
+#define MRVL_NETA_TXD_MAX 2048
+
+/** Tx queue descriptors alignment in B */
+#define MRVL_NETA_TXD_ALIGN 32
+
+/** Minimum/maximum number of descriptors in rx queue */
+#define MRVL_NETA_RXD_MIN 16
+#define MRVL_NETA_RXD_MAX 2048
+
+/** Rx queue descriptors alignment in B */
+#define MRVL_NETA_RXD_ALIGN 32
+
+#define MRVL_NETA_VLAN_TAG_LEN 4
+#define MRVL_NETA_ETH_HDRS_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
+ MRVL_NETA_VLAN_TAG_LEN)
+
+#define MRVL_NETA_HDRS_LEN (MV_MH_SIZE + MRVL_NETA_ETH_HDRS_LEN)
+#define MRVL_NETA_MTU_TO_MRU(mtu) ((mtu) + MRVL_NETA_HDRS_LEN)
+#define MRVL_NETA_MRU_TO_MTU(mru) ((mru) - MRVL_NETA_HDRS_LEN)
+
+
+struct mvneta_priv {
+ /* Hot fields, used in fast path. */
+ struct neta_ppio *ppio; /**< Port handler pointer */
+
+ uint8_t pp_id;
+ uint8_t ppio_id; /* ppio port id */
+ uint8_t uc_mc_flushed;
+ uint8_t multiseg;
+
+ struct neta_ppio_params ppio_params;
+
+ uint64_t rate_max;
+ struct rte_eth_stats prev_stats;
+};
+
+/** Current log type. */
+extern int mvneta_logtype;
+
+#define MVNETA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, mvneta_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#endif /* _MVNETA_ETHDEV_H_ */
diff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c
new file mode 100644
index 00000000..62caa684
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_rxtx.c
@@ -0,0 +1,1030 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include "mvneta_rxtx.h"
+
+#define MVNETA_PKT_EFFEC_OFFS (MRVL_NETA_PKT_OFFS + MV_MH_SIZE)
+
+#define MRVL_NETA_DEFAULT_TC 0
+
+/** Maximum number of descriptors in shadow queue. Must be power of 2 */
+#define MRVL_NETA_TX_SHADOWQ_SIZE MRVL_NETA_TXD_MAX
+
+/** Shadow queue size mask (since shadow queue size is power of 2) */
+#define MRVL_NETA_TX_SHADOWQ_MASK (MRVL_NETA_TX_SHADOWQ_SIZE - 1)
+
+/** Minimum number of sent buffers to release from shadow queue to BM */
+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN 16
+
+/** Maximum number of sent buffers to release from shadow queue to BM */
+#define MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX 64
+
+#define MVNETA_COOKIE_ADDR_INVALID ~0ULL
+#define MVNETA_COOKIE_HIGH_ADDR_SHIFT (sizeof(neta_cookie_t) * 8)
+#define MVNETA_COOKIE_HIGH_ADDR_MASK (~0ULL << MVNETA_COOKIE_HIGH_ADDR_SHIFT)
+
+#define MVNETA_SET_COOKIE_HIGH_ADDR(addr) { \
+ if (unlikely(cookie_addr_high == MVNETA_COOKIE_ADDR_INVALID)) \
+ cookie_addr_high = \
+ (uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK;\
+}
+
+#define MVNETA_CHECK_COOKIE_HIGH_ADDR(addr) \
+ ((likely(cookie_addr_high == \
+ ((uint64_t)(addr) & MVNETA_COOKIE_HIGH_ADDR_MASK))) ? 1 : 0)
+
+struct mvneta_rxq {
+ struct mvneta_priv *priv;
+ struct rte_mempool *mp;
+ int queue_id;
+ int port_id;
+ int size;
+ int cksum_enabled;
+ uint64_t bytes_recv;
+ uint64_t drop_mac;
+ uint64_t pkts_processed;
+};
+
+/*
+ * To use buffer harvesting based on loopback port shadow queue structure
+ * was introduced for buffers information bookkeeping.
+ */
+struct mvneta_shadow_txq {
+ int head; /* write index - used when sending buffers */
+ int tail; /* read index - used when releasing buffers */
+ u16 size; /* queue occupied size */
+ struct neta_buff_inf ent[MRVL_NETA_TX_SHADOWQ_SIZE]; /* q entries */
+};
+
+struct mvneta_txq {
+ struct mvneta_priv *priv;
+ int queue_id;
+ int port_id;
+ uint64_t bytes_sent;
+ struct mvneta_shadow_txq shadow_txq;
+ int tx_deferred_start;
+};
+
+static uint64_t cookie_addr_high = MVNETA_COOKIE_ADDR_INVALID;
+static uint16_t rx_desc_free_thresh = MRVL_NETA_BUF_RELEASE_BURST_SIZE_MIN;
+
+static inline int
+mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num)
+{
+ struct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];
+ struct neta_buff_inf entries[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX];
+ int i, ret;
+ uint16_t nb_desc = *num;
+
+ ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc);
+ if (ret) {
+ MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc);
+ *num = 0;
+ return -1;
+ }
+
+ MVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]);
+
+ for (i = 0; i < nb_desc; i++) {
+ if (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) {
+ MVNETA_LOG(ERR,
+ "mbuf virt high addr 0x%lx out of range 0x%lx",
+ (uint64_t)mbufs[i] >> 32,
+ cookie_addr_high >> 32);
+ *num = 0;
+ goto out;
+ }
+ entries[i].addr = rte_mbuf_data_iova_default(mbufs[i]);
+ entries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i];
+ }
+ neta_ppio_inq_put_buffs(priv->ppio, rxq->queue_id, entries, num);
+
+out:
+ for (i = *num; i < nb_desc; i++)
+ rte_pktmbuf_free(mbufs[i]);
+
+ return 0;
+}
+
+/**
+ * Allocate buffers from mempool
+ * and store addresses in rx descriptors.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static inline int
+mvneta_buffs_alloc(struct mvneta_priv *priv, struct mvneta_rxq *rxq, int *num)
+{
+ uint16_t nb_desc, nb_desc_burst, sent = 0;
+ int ret = 0;
+
+ nb_desc = *num;
+
+ do {
+ nb_desc_burst =
+ (nb_desc < MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX) ?
+ nb_desc : MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX;
+
+ ret = mvneta_buffs_refill(priv, rxq, &nb_desc_burst);
+ if (unlikely(ret || !nb_desc_burst))
+ break;
+
+ sent += nb_desc_burst;
+ nb_desc -= nb_desc_burst;
+
+ } while (nb_desc);
+
+ *num = sent;
+
+ return ret;
+}
+
+static inline void
+mvneta_fill_shadowq(struct mvneta_shadow_txq *sq, struct rte_mbuf *buf)
+{
+ sq->ent[sq->head].cookie = (uint64_t)buf;
+ sq->ent[sq->head].addr = buf ?
+ rte_mbuf_data_iova_default(buf) : 0;
+
+ sq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ sq->size++;
+}
+
+static inline void
+mvneta_fill_desc(struct neta_ppio_desc *desc, struct rte_mbuf *buf)
+{
+ neta_ppio_outq_desc_reset(desc);
+ neta_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
+ neta_ppio_outq_desc_set_pkt_offset(desc, 0);
+ neta_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
+}
+
+/**
+ * Release already sent buffers to mempool.
+ *
+ * @param ppio
+ * Pointer to the port structure.
+ * @param sq
+ * Pointer to the shadow queue.
+ * @param qid
+ * Queue id number.
+ * @param force
+ * Force releasing packets.
+ */
+static inline void
+mvneta_sent_buffers_free(struct neta_ppio *ppio,
+ struct mvneta_shadow_txq *sq, int qid)
+{
+ struct neta_buff_inf *entry;
+ uint16_t nb_done = 0;
+ int i;
+ int tail = sq->tail;
+
+ neta_ppio_get_num_outq_done(ppio, qid, &nb_done);
+
+ if (nb_done > sq->size) {
+ MVNETA_LOG(ERR, "nb_done: %d, sq->size %d",
+ nb_done, sq->size);
+ return;
+ }
+
+ for (i = 0; i < nb_done; i++) {
+ entry = &sq->ent[tail];
+
+ if (unlikely(!entry->addr)) {
+ MVNETA_LOG(DEBUG,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!",
+ tail, (u64)entry->cookie,
+ (u64)entry->addr);
+ tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ continue;
+ }
+
+ struct rte_mbuf *mbuf;
+
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | entry->cookie);
+ rte_pktmbuf_free(mbuf);
+ tail = (tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ }
+
+ sq->tail = tail;
+ sq->size -= nb_done;
+}
+
+/**
+ * Return packet type information and l3/l4 offsets.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ * @param l3_offset
+ * l3 packet offset.
+ * @param l4_offset
+ * l4 packet offset.
+ *
+ * @return
+ * Packet type information.
+ */
+static inline uint64_t
+mvneta_desc_to_packet_type_and_offset(struct neta_ppio_desc *desc,
+ uint8_t *l3_offset, uint8_t *l4_offset)
+{
+ enum neta_inq_l3_type l3_type;
+ enum neta_inq_l4_type l4_type;
+ uint64_t packet_type;
+
+ neta_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
+ neta_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
+
+ packet_type = RTE_PTYPE_L2_ETHER;
+
+ if (NETA_RXD_GET_VLAN_INFO(desc))
+ packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+
+ switch (l3_type) {
+ case NETA_INQ_L3_TYPE_IPV4_BAD:
+ case NETA_INQ_L3_TYPE_IPV4_OK:
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ break;
+ case NETA_INQ_L3_TYPE_IPV6:
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ break;
+ default:
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ MVNETA_LOG(DEBUG, "Failed to recognize l3 packet type");
+ break;
+ }
+
+ switch (l4_type) {
+ case NETA_INQ_L4_TYPE_TCP:
+ packet_type |= RTE_PTYPE_L4_TCP;
+ break;
+ case NETA_INQ_L4_TYPE_UDP:
+ packet_type |= RTE_PTYPE_L4_UDP;
+ break;
+ default:
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ MVNETA_LOG(DEBUG, "Failed to recognize l4 packet type");
+ break;
+ }
+
+ return packet_type;
+}
+
+/**
+ * Prepare offload information.
+ *
+ * @param ol_flags
+ * Offload flags.
+ * @param packet_type
+ * Packet type bitfield.
+ * @param l3_type
+ * Pointer to the neta_ouq_l3_type structure.
+ * @param l4_type
+ * Pointer to the neta_outq_l4_type structure.
+ * @param gen_l3_cksum
+ * Will be set to 1 in case l3 checksum is computed.
+ * @param l4_cksum
+ * Will be set to 1 in case l4 checksum is computed.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static inline int
+mvneta_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
+ enum neta_outq_l3_type *l3_type,
+ enum neta_outq_l4_type *l4_type,
+ int *gen_l3_cksum,
+ int *gen_l4_cksum)
+{
+ /*
+ * Based on ol_flags prepare information
+ * for neta_ppio_outq_desc_set_proto_info() which setups descriptor
+ * for offloading.
+ */
+ if (ol_flags & PKT_TX_IPV4) {
+ *l3_type = NETA_OUTQ_L3_TYPE_IPV4;
+ *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *l3_type = NETA_OUTQ_L3_TYPE_IPV6;
+ /* no checksum for ipv6 header */
+ *gen_l3_cksum = 0;
+ } else {
+ /* if something different then stop processing */
+ return -1;
+ }
+
+ ol_flags &= PKT_TX_L4_MASK;
+ if ((packet_type & RTE_PTYPE_L4_TCP) &&
+ ol_flags == PKT_TX_TCP_CKSUM) {
+ *l4_type = NETA_OUTQ_L4_TYPE_TCP;
+ *gen_l4_cksum = 1;
+ } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
+ ol_flags == PKT_TX_UDP_CKSUM) {
+ *l4_type = NETA_OUTQ_L4_TYPE_UDP;
+ *gen_l4_cksum = 1;
+ } else {
+ *l4_type = NETA_OUTQ_L4_TYPE_OTHER;
+ /* no checksum for other type */
+ *gen_l4_cksum = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Get offload information from the received packet descriptor.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ *
+ * @return
+ * Mbuf offload flags.
+ */
+static inline uint64_t
+mvneta_desc_to_ol_flags(struct neta_ppio_desc *desc)
+{
+ uint64_t flags;
+ enum neta_inq_desc_status status;
+
+ status = neta_ppio_inq_desc_get_l3_pkt_error(desc);
+ if (unlikely(status != NETA_DESC_ERR_OK))
+ flags = PKT_RX_IP_CKSUM_BAD;
+ else
+ flags = PKT_RX_IP_CKSUM_GOOD;
+
+ status = neta_ppio_inq_desc_get_l4_pkt_error(desc);
+ if (unlikely(status != NETA_DESC_ERR_OK))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ return flags;
+}
+
+/**
+ * DPDK callback for transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mvneta_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct mvneta_txq *q = txq;
+ struct mvneta_shadow_txq *sq;
+ struct neta_ppio_desc descs[nb_pkts];
+
+ int i, ret, bytes_sent = 0;
+ uint16_t num, sq_free_size;
+ uint64_t addr;
+
+ sq = &q->shadow_txq;
+ if (unlikely(!nb_pkts || !q->priv->ppio))
+ return 0;
+
+ if (sq->size)
+ mvneta_sent_buffers_free(q->priv->ppio,
+ sq, q->queue_id);
+
+ sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;
+ if (unlikely(nb_pkts > sq_free_size)) {
+ MVNETA_LOG(DEBUG,
+ "No room in shadow queue for %d packets! %d packets will be sent.",
+ nb_pkts, sq_free_size);
+ nb_pkts = sq_free_size;
+ }
+
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+ int gen_l3_cksum, gen_l4_cksum;
+ enum neta_outq_l3_type l3_type;
+ enum neta_outq_l4_type l4_type;
+
+ /* Fill first mbuf info in shadow queue */
+ mvneta_fill_shadowq(sq, mbuf);
+ mvneta_fill_desc(&descs[i], mbuf);
+
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
+
+ ret = mvneta_prepare_proto_info(mbuf->ol_flags,
+ mbuf->packet_type,
+ &l3_type, &l4_type,
+ &gen_l3_cksum,
+ &gen_l4_cksum);
+ if (unlikely(ret))
+ continue;
+
+ neta_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
+ mbuf->l2_len,
+ mbuf->l2_len + mbuf->l3_len,
+ gen_l3_cksum, gen_l4_cksum);
+ }
+ num = nb_pkts;
+ neta_ppio_send(q->priv->ppio, q->queue_id, descs, &nb_pkts);
+
+
+ /* number of packets that were not sent */
+ if (unlikely(num > nb_pkts)) {
+ for (i = nb_pkts; i < num; i++) {
+ sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) &
+ MRVL_NETA_TX_SHADOWQ_MASK;
+ addr = cookie_addr_high | sq->ent[sq->head].cookie;
+ bytes_sent -=
+ rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
+ }
+ sq->size -= num - nb_pkts;
+ }
+
+ q->bytes_sent += bytes_sent;
+
+ return nb_pkts;
+}
+
+/** DPDK callback for S/G transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mvneta_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct mvneta_txq *q = txq;
+ struct mvneta_shadow_txq *sq;
+ struct neta_ppio_desc descs[nb_pkts * NETA_PPIO_DESC_NUM_FRAGS];
+ struct neta_ppio_sg_pkts pkts;
+ uint8_t frags[nb_pkts];
+ int i, j, ret, bytes_sent = 0;
+ int tail, tail_first;
+ uint16_t num, sq_free_size;
+ uint16_t nb_segs, total_descs = 0;
+ uint64_t addr;
+
+ sq = &q->shadow_txq;
+ pkts.frags = frags;
+ pkts.num = 0;
+
+ if (unlikely(!q->priv->ppio))
+ return 0;
+
+ if (sq->size)
+ mvneta_sent_buffers_free(q->priv->ppio,
+ sq, q->queue_id);
+ /* Save shadow queue free size */
+ sq_free_size = MRVL_NETA_TX_SHADOWQ_SIZE - sq->size - 1;
+
+ tail = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+ struct rte_mbuf *seg = NULL;
+ int gen_l3_cksum, gen_l4_cksum;
+ enum neta_outq_l3_type l3_type;
+ enum neta_outq_l4_type l4_type;
+
+ nb_segs = mbuf->nb_segs;
+ total_descs += nb_segs;
+
+ /*
+ * Check if total_descs does not exceed
+ * shadow queue free size
+ */
+ if (unlikely(total_descs > sq_free_size)) {
+ total_descs -= nb_segs;
+ MVNETA_LOG(DEBUG,
+ "No room in shadow queue for %d packets! "
+ "%d packets will be sent.",
+ nb_pkts, i);
+ break;
+ }
+
+
+ /* Check if nb_segs does not exceed the max nb of desc per
+ * fragmented packet
+ */
+ if (unlikely(nb_segs > NETA_PPIO_DESC_NUM_FRAGS)) {
+ total_descs -= nb_segs;
+ MVNETA_LOG(ERR,
+ "Too many segments. Packet won't be sent.");
+ break;
+ }
+
+ pkts.frags[pkts.num] = nb_segs;
+ pkts.num++;
+ tail_first = tail;
+
+ seg = mbuf;
+ for (j = 0; j < nb_segs - 1; j++) {
+ /* For the subsequent segments, set shadow queue
+ * buffer to NULL
+ */
+ mvneta_fill_shadowq(sq, NULL);
+ mvneta_fill_desc(&descs[tail], seg);
+
+ tail++;
+ seg = seg->next;
+ }
+ /* Put first mbuf info in last shadow queue entry */
+ mvneta_fill_shadowq(sq, mbuf);
+ /* Update descriptor with last segment */
+ mvneta_fill_desc(&descs[tail++], seg);
+
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
+
+ ret = mvneta_prepare_proto_info(mbuf->ol_flags,
+ mbuf->packet_type,
+ &l3_type, &l4_type,
+ &gen_l3_cksum,
+ &gen_l4_cksum);
+ if (unlikely(ret))
+ continue;
+
+ neta_ppio_outq_desc_set_proto_info(&descs[tail_first],
+ l3_type, l4_type,
+ mbuf->l2_len,
+ mbuf->l2_len + mbuf->l3_len,
+ gen_l3_cksum, gen_l4_cksum);
+ }
+ num = total_descs;
+ neta_ppio_send_sg(q->priv->ppio, q->queue_id, descs, &total_descs,
+ &pkts);
+
+ /* number of packets that were not sent */
+ if (unlikely(num > total_descs)) {
+ for (i = total_descs; i < num; i++) {
+ sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE +
+ sq->head - 1) &
+ MRVL_NETA_TX_SHADOWQ_MASK;
+ addr = sq->ent[sq->head].cookie;
+ if (addr) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | addr);
+ bytes_sent -= rte_pktmbuf_pkt_len(mbuf);
+ }
+ }
+ sq->size -= num - total_descs;
+ nb_pkts = pkts.num;
+ }
+
+ q->bytes_sent += bytes_sent;
+
+ return nb_pkts;
+}
+
+/**
+ * Set tx burst function according to offload flag
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mvneta_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (priv->multiseg) {
+ MVNETA_LOG(INFO, "Using multi-segment tx callback");
+ dev->tx_pkt_burst = mvneta_tx_sg_pkt_burst;
+ } else {
+ MVNETA_LOG(INFO, "Using single-segment tx callback");
+ dev->tx_pkt_burst = mvneta_tx_pkt_burst;
+ }
+}
+
+/**
+ * DPDK callback for receive.
+ *
+ * @param rxq
+ * Generic pointer to the receive queue.
+ * @param rx_pkts
+ * Array to store received packets.
+ * @param nb_pkts
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received.
+ */
+uint16_t
+mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct mvneta_rxq *q = rxq;
+ struct neta_ppio_desc descs[nb_pkts];
+ int i, ret, rx_done = 0, rx_dropped = 0;
+
+ if (unlikely(!q || !q->priv->ppio))
+ return 0;
+
+ ret = neta_ppio_recv(q->priv->ppio, q->queue_id,
+ descs, &nb_pkts);
+
+ if (unlikely(ret < 0)) {
+ MVNETA_LOG(ERR, "Failed to receive packets");
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf;
+ uint8_t l3_offset, l4_offset;
+ enum neta_inq_desc_status status;
+ uint64_t addr;
+
+ addr = cookie_addr_high |
+ neta_ppio_inq_desc_get_cookie(&descs[i]);
+ mbuf = (struct rte_mbuf *)addr;
+
+ rte_pktmbuf_reset(mbuf);
+
+ /* drop packet in case of mac, overrun or resource error */
+ status = neta_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
+ if (unlikely(status != NETA_DESC_ERR_OK)) {
+ /* Release the mbuf to the mempool since
+ * it won't be transferred to tx path
+ */
+ rte_pktmbuf_free(mbuf);
+ q->drop_mac++;
+ rx_dropped++;
+ continue;
+ }
+
+ mbuf->data_off += MVNETA_PKT_EFFEC_OFFS;
+ mbuf->pkt_len = neta_ppio_inq_desc_get_pkt_len(&descs[i]);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = q->port_id;
+ mbuf->packet_type =
+ mvneta_desc_to_packet_type_and_offset(&descs[i],
+ &l3_offset,
+ &l4_offset);
+ mbuf->l2_len = l3_offset;
+ mbuf->l3_len = l4_offset - l3_offset;
+
+ if (likely(q->cksum_enabled))
+ mbuf->ol_flags = mvneta_desc_to_ol_flags(&descs[i]);
+
+ rx_pkts[rx_done++] = mbuf;
+ q->bytes_recv += mbuf->pkt_len;
+ }
+ q->pkts_processed += rx_done + rx_dropped;
+
+ if (q->pkts_processed > rx_desc_free_thresh) {
+ int buf_to_refill = rx_desc_free_thresh;
+
+ ret = mvneta_buffs_alloc(q->priv, q, &buf_to_refill);
+ if (ret)
+ MVNETA_LOG(ERR, "Refill failed");
+ q->pkts_processed -= buf_to_refill;
+ }
+
+ return rx_done;
+}
+
+/**
+ * DPDK callback to configure the receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Thresholds parameters (unused_).
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+int
+mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_rxconf *conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct mvneta_rxq *rxq;
+ uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
+ uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS;
+
+ if (frame_size < max_rx_pkt_len) {
+ MVNETA_LOG(ERR,
+ "Mbuf size must be increased to %u bytes to hold up "
+ "to %u bytes of data.",
+ buf_size + max_rx_pkt_len - frame_size,
+ max_rx_pkt_len);
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ MVNETA_LOG(INFO, "Setting max rx pkt len to %u",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ }
+
+ if (dev->data->rx_queues[idx]) {
+ rte_free(dev->data->rx_queues[idx]);
+ dev->data->rx_queues[idx] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
+ if (!rxq)
+ return -ENOMEM;
+
+ rxq->priv = priv;
+ rxq->mp = mp;
+ rxq->cksum_enabled = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->queue_id = idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->size = desc;
+ rx_desc_free_thresh = RTE_MIN(rx_desc_free_thresh, (desc / 2));
+ priv->ppio_params.inqs_params.tcs_params[MRVL_NETA_DEFAULT_TC].size =
+ desc;
+
+ dev->data->rx_queues[idx] = rxq;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to configure the transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Transmit queue index.
+ * @param desc
+ * Number of descriptors to configure in the queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Tx queue configuration parameters.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+int
+mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ struct mvneta_txq *txq;
+
+ if (dev->data->tx_queues[idx]) {
+ rte_free(dev->data->tx_queues[idx]);
+ dev->data->tx_queues[idx] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->priv = priv;
+ txq->queue_id = idx;
+ txq->port_id = dev->data->port_id;
+ txq->tx_deferred_start = conf->tx_deferred_start;
+ dev->data->tx_queues[idx] = txq;
+
+ priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
+ priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to release the transmit queue.
+ *
+ * @param txq
+ * Generic transmit queue pointer.
+ */
+void
+mvneta_tx_queue_release(void *txq)
+{
+ struct mvneta_txq *q = txq;
+
+ if (!q)
+ return;
+
+ rte_free(q);
+}
+
+/**
+ * Return mbufs to mempool.
+ *
+ * @param rxq
+ * Pointer to rx queue structure
+ * @param desc
+ * Array of rx descriptors
+ */
+static void
+mvneta_recv_buffs_free(struct neta_ppio_desc *desc, uint16_t num)
+{
+ uint64_t addr;
+ uint8_t i;
+
+ for (i = 0; i < num; i++) {
+ if (desc) {
+ addr = cookie_addr_high |
+ neta_ppio_inq_desc_get_cookie(desc);
+ if (addr)
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ desc++;
+ }
+ }
+}
+
+int
+mvneta_alloc_rx_bufs(struct rte_eth_dev *dev)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+ int ret = 0, i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mvneta_rxq *rxq = dev->data->rx_queues[i];
+ int num = rxq->size;
+
+ ret = mvneta_buffs_alloc(priv, rxq, &num);
+ if (ret || num != rxq->size) {
+ rte_free(rxq);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Flush single receive queue.
+ *
+ * @param rxq
+ * Pointer to rx queue structure.
+ * @param descs
+ * Array of rx descriptors
+ */
+static void
+mvneta_rx_queue_flush(struct mvneta_rxq *rxq)
+{
+ struct neta_ppio_desc *descs;
+ struct neta_buff_inf *bufs;
+ uint16_t num;
+ int ret, i;
+
+ descs = rte_malloc("rxdesc", MRVL_NETA_RXD_MAX * sizeof(*descs), 0);
+ bufs = rte_malloc("buffs", MRVL_NETA_RXD_MAX * sizeof(*bufs), 0);
+
+ do {
+ num = MRVL_NETA_RXD_MAX;
+ ret = neta_ppio_recv(rxq->priv->ppio,
+ rxq->queue_id,
+ descs, &num);
+ mvneta_recv_buffs_free(descs, num);
+ } while (ret == 0 && num);
+
+ rxq->pkts_processed = 0;
+
+ num = MRVL_NETA_RXD_MAX;
+
+ neta_ppio_inq_get_all_buffs(rxq->priv->ppio, rxq->queue_id, bufs, &num);
+ MVNETA_LOG(INFO, "freeing %u unused bufs.", num);
+
+ for (i = 0; i < num; i++) {
+ uint64_t addr;
+ if (bufs[i].cookie) {
+ addr = cookie_addr_high | bufs[i].cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ }
+ }
+
+ rte_free(descs);
+ rte_free(bufs);
+}
+
+/**
+ * Flush single transmit queue.
+ *
+ * @param txq
+ * Pointer to tx queue structure
+ */
+static void
+mvneta_tx_queue_flush(struct mvneta_txq *txq)
+{
+ struct mvneta_shadow_txq *sq = &txq->shadow_txq;
+
+ if (sq->size)
+ mvneta_sent_buffers_free(txq->priv->ppio, sq,
+ txq->queue_id);
+
+ /* free the rest of them */
+ while (sq->tail != sq->head) {
+ uint64_t addr = cookie_addr_high |
+ sq->ent[sq->tail].cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ sq->tail = (sq->tail + 1) & MRVL_NETA_TX_SHADOWQ_MASK;
+ }
+ memset(sq, 0, sizeof(*sq));
+}
+
+void
+mvneta_flush_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ MVNETA_LOG(INFO, "Flushing rx queues");
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mvneta_rxq *rxq = dev->data->rx_queues[i];
+
+ mvneta_rx_queue_flush(rxq);
+ }
+
+ MVNETA_LOG(INFO, "Flushing tx queues");
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mvneta_txq *txq = dev->data->tx_queues[i];
+
+ mvneta_tx_queue_flush(txq);
+ }
+}
+
+/**
+ * DPDK callback to release the receive queue.
+ *
+ * @param rxq
+ * Generic receive queue pointer.
+ */
+void
+mvneta_rx_queue_release(void *rxq)
+{
+ struct mvneta_rxq *q = rxq;
+
+ if (!q)
+ return;
+
+ /* If dev_stop was called already, mbufs are already
+ * returned to mempool and ppio is deinitialized.
+ * Skip this step.
+ */
+
+ if (q->priv->ppio)
+ mvneta_rx_queue_flush(q);
+
+ rte_free(rxq);
+}
+
+/**
+ * DPDK callback to get information about specific receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Receive queue index.
+ * @param qinfo
+ * Receive queue information structure.
+ */
+void
+mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct mvneta_rxq *q = dev->data->rx_queues[rx_queue_id];
+
+ qinfo->mp = q->mp;
+ qinfo->nb_desc = q->size;
+}
+
+/**
+ * DPDK callback to get information about specific transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param tx_queue_id
+ * Transmit queue index.
+ * @param qinfo
+ * Transmit queue information structure.
+ */
+void
+mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct mvneta_priv *priv = dev->data->dev_private;
+
+ qinfo->nb_desc =
+ priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
+}
diff --git a/drivers/net/mvneta/mvneta_rxtx.h b/drivers/net/mvneta/mvneta_rxtx.h
new file mode 100644
index 00000000..cc291901
--- /dev/null
+++ b/drivers/net/mvneta/mvneta_rxtx.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MVNETA_RXTX_H_
+#define _MVNETA_RXTX_H_
+
+#include "mvneta_ethdev.h"
+
+int mvneta_alloc_rx_bufs(struct rte_eth_dev *dev);
+
+void mvneta_flush_queues(struct rte_eth_dev *dev);
+
+void mvneta_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void mvneta_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+void mvneta_set_tx_function(struct rte_eth_dev *dev);
+
+uint16_t
+mvneta_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+int
+mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_rxconf *conf __rte_unused,
+ struct rte_mempool *mp);
+int
+mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf);
+
+void mvneta_rx_queue_release(void *rxq);
+void mvneta_tx_queue_release(void *txq);
+
+#endif /* _MVNETA_RXTX_H_ */
diff --git a/drivers/net/mvneta/rte_pmd_mvneta_version.map b/drivers/net/mvneta/rte_pmd_mvneta_version.map
new file mode 100644
index 00000000..24bd5cdb
--- /dev/null
+++ b/drivers/net/mvneta/rte_pmd_mvneta_version.map
@@ -0,0 +1,3 @@
+DPDK_18.11 {
+ local: *;
+};
diff --git a/drivers/net/mvpp2/Makefile b/drivers/net/mvpp2/Makefile
index 492aef97..661d2cda 100644
--- a/drivers/net/mvpp2/Makefile
+++ b/drivers/net/mvpp2/Makefile
@@ -23,6 +23,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_mvpp2_version.map
# external library dependencies
+CFLAGS += -I$(RTE_SDK)/drivers/common/mvep
CFLAGS += -I$(LIBMUSDK_PATH)/include
CFLAGS += -DMVCONF_TYPES_PUBLIC
CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
@@ -32,11 +33,13 @@ LDLIBS += -L$(LIBMUSDK_PATH)/lib
LDLIBS += -lmusdk
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
-LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_bus_vdev -lrte_common_mvep
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_qos.c
SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_mtr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_tm.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mvpp2/meson.build b/drivers/net/mvpp2/meson.build
index e1398895..70ef2d64 100644
--- a/drivers/net/mvpp2/meson.build
+++ b/drivers/net/mvpp2/meson.build
@@ -19,7 +19,9 @@ endif
sources = files(
'mrvl_ethdev.c',
'mrvl_flow.c',
- 'mrvl_qos.c'
+ 'mrvl_qos.c',
+ 'mrvl_mtr.c',
+ 'mrvl_tm.c'
)
-deps += ['cfgfile']
+deps += ['cfgfile', 'common_mvep']
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index a2d0576e..ab4c14e5 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -10,15 +10,6 @@
#include <rte_malloc.h>
#include <rte_bus_vdev.h>
-/* Unluckily, container_of is defined by both DPDK and MUSDK,
- * we'll declare only one version.
- *
- * Note that it is not used in this PMD anyway.
- */
-#ifdef container_of
-#undef container_of
-#endif
-
#include <fcntl.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
@@ -29,8 +20,12 @@
#include <sys/stat.h>
#include <sys/types.h>
+#include <rte_mvep_common.h>
#include "mrvl_ethdev.h"
#include "mrvl_qos.h"
+#include "mrvl_flow.h"
+#include "mrvl_mtr.h"
+#include "mrvl_tm.h"
/* bitmask with reserved hifs */
#define MRVL_MUSDK_HIFS_RESERVED 0x0F
@@ -59,23 +54,18 @@
#define MRVL_ARP_LENGTH 28
#define MRVL_COOKIE_ADDR_INVALID ~0ULL
-
-#define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
-#define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
-
-/* Memory size (in bytes) for MUSDK dma buffers */
-#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+#define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
/** Port Rx offload capabilities */
#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_CRC_STRIP | \
DEV_RX_OFFLOAD_CHECKSUM)
/** Port Tx offloads capabilities */
#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
static const char * const valid_args[] = {
MRVL_IFACE_NAME_ARG,
@@ -86,13 +76,12 @@ static const char * const valid_args[] = {
static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
static struct pp2_hif *hifs[RTE_MAX_LCORE];
static int used_bpools[PP2_NUM_PKT_PROC] = {
- MRVL_MUSDK_BPOOLS_RESERVED,
- MRVL_MUSDK_BPOOLS_RESERVED
+ [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
};
-struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
-int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
-uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
+static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
+static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
int mrvl_logtype;
@@ -116,7 +105,9 @@ struct mrvl_shadow_txq {
int head; /* write index - used when sending buffers */
int tail; /* read index - used when releasing buffers */
u16 size; /* queue occupied size */
- u16 num_to_release; /* number of buffers sent, that can be released */
+ u16 num_to_release; /* number of descriptors sent, that can be
+ * released
+ */
struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
};
@@ -148,6 +139,12 @@ static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
struct pp2_hif *hif, unsigned int core_id,
struct mrvl_shadow_txq *sq, int qid, int force);
+static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+static uint16_t mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+
#define MRVL_XSTATS_TBL_ENTRY(name) { \
#name, offsetof(struct pp2_ppio_statistics, name), \
sizeof(((struct pp2_ppio_statistics *)0)->name) \
@@ -174,6 +171,31 @@ static struct {
MRVL_XSTATS_TBL_ENTRY(tx_errors)
};
+static inline void
+mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf)
+{
+ sq->ent[sq->head].buff.cookie = (uint64_t)buf;
+ sq->ent[sq->head].buff.addr = buf ?
+ rte_mbuf_data_iova_default(buf) : 0;
+
+ sq->ent[sq->head].bpool =
+ (unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS ||
+ buf->refcnt > 1)) ? NULL :
+ mrvl_port_to_bpool_lookup[buf->port];
+
+ sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size++;
+}
+
+static inline void
+mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf)
+{
+ pp2_ppio_outq_desc_reset(desc);
+ pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
+ pp2_ppio_outq_desc_set_pkt_offset(desc, 0);
+ pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
+}
+
static inline int
mrvl_get_bpool_size(int pp2_id, int pool_id)
{
@@ -253,6 +275,27 @@ out:
}
/**
+ * Set tx burst function according to offload flag
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (priv->multiseg) {
+ RTE_LOG(INFO, PMD, "Using multi-segment tx callback\n");
+ dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst;
+ } else {
+ RTE_LOG(INFO, PMD, "Using single-segment tx callback\n");
+ dev->tx_pkt_burst = mrvl_tx_pkt_burst;
+ }
+}
+
+/**
* Configure rss based on dpdk rss configuration.
*
* @param priv
@@ -307,6 +350,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
+ if (priv->ppio) {
+ MRVL_LOG(INFO, "Device reconfiguration is not supported");
+ return -EINVAL;
+ }
+
if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
@@ -314,14 +362,6 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- /* KEEP_CRC offload flag is not supported by PMD
- * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
- */
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
- MRVL_LOG(INFO, "L2 CRC stripping is always enabled in hw");
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
if (dev->data->dev_conf.rxmode.split_hdr_size) {
MRVL_LOG(INFO, "Split headers not supported");
return -EINVAL;
@@ -329,7 +369,10 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ MRVL_PP2_ETH_HDRS_LEN;
+
+ if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ priv->multiseg = 1;
ret = mrvl_configure_rxqs(priv, dev->data->port_id,
dev->data->nb_rx_queues);
@@ -345,6 +388,10 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
priv->ppio_params.maintain_stats = 1;
priv->nb_rx_queues = dev->data->nb_rx_queues;
+ ret = mrvl_tm_init(dev);
+ if (ret < 0)
+ return ret;
+
if (dev->data->nb_rx_queues == 1 &&
dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
@@ -375,21 +422,55 @@ static int
mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct mrvl_priv *priv = dev->data->dev_private;
- /* extra MV_MH_SIZE bytes are required for Marvell tag */
- uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint16_t mru;
+ uint16_t mbuf_data_size = 0; /* SW buffer size */
int ret;
- if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
+ mru = MRVL_PP2_MTU_TO_MRU(mtu);
+ /*
+ * min_rx_buf_size is equal to mbuf data size
+ * if pmd didn't set it differently
+ */
+ mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+ /* Prevent PMD from:
+ * - setting mru greater than the mbuf size resulting in
+ * hw and sw buffer size mismatch
+ * - setting mtu that requires the support of scattered packets
+ * when this feature has not been enabled/supported so far
+ * (TODO check scattered_rx flag here once scattered RX is supported).
+ */
+ if (mru + MRVL_PKT_OFFS > mbuf_data_size) {
+ mru = mbuf_data_size - MRVL_PKT_OFFS;
+ mtu = MRVL_PP2_MRU_TO_MTU(mru);
+ MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
+ "by current mbuf size: %u. Set MTU to %u, MRU to %u",
+ mbuf_data_size, mtu, mru);
+ }
+
+ if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
+ MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
return -EINVAL;
+ }
+
+ dev->data->mtu = mtu;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
if (!priv->ppio)
return 0;
ret = pp2_ppio_set_mru(priv->ppio, mru);
- if (ret)
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to change MRU");
return ret;
+ }
- return pp2_ppio_set_mtu(priv->ppio, mtu);
+ ret = pp2_ppio_set_mtu(priv->ppio, mtu);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to change MTU");
+ return ret;
+ }
+
+ return 0;
}
/**
@@ -528,6 +609,9 @@ mrvl_dev_start(struct rte_eth_dev *dev)
char match[MRVL_MATCH_LEN];
int ret = 0, i, def_init_size;
+ if (priv->ppio)
+ return mrvl_dev_set_link_up(dev);
+
snprintf(match, sizeof(match), "ppio-%d:%d",
priv->pp_id, priv->ppio_id);
priv->ppio_params.match = match;
@@ -597,9 +681,13 @@ mrvl_dev_start(struct rte_eth_dev *dev)
}
priv->vlan_flushed = 1;
}
+ ret = mrvl_mtu_set(dev, dev->data->mtu);
+ if (ret)
+ MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu);
/* For default QoS config, don't start classifier. */
- if (mrvl_qos_cfg) {
+ if (mrvl_qos_cfg &&
+ mrvl_qos_cfg->port[dev->data->port_id].use_global_defaults == 0) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
MRVL_LOG(ERR, "Failed to setup QoS mapping");
@@ -631,6 +719,10 @@ mrvl_dev_start(struct rte_eth_dev *dev)
goto out;
}
+ mrvl_flow_init(dev);
+ mrvl_mtr_init(dev);
+ mrvl_set_tx_function(dev);
+
return 0;
out:
MRVL_LOG(ERR, "Failed to start device");
@@ -752,28 +844,7 @@ mrvl_flush_bpool(struct rte_eth_dev *dev)
static void
mrvl_dev_stop(struct rte_eth_dev *dev)
{
- struct mrvl_priv *priv = dev->data->dev_private;
-
mrvl_dev_set_link_down(dev);
- mrvl_flush_rx_queues(dev);
- mrvl_flush_tx_shadow_queues(dev);
- if (priv->cls_tbl) {
- pp2_cls_tbl_deinit(priv->cls_tbl);
- priv->cls_tbl = NULL;
- }
- if (priv->qos_tbl) {
- pp2_cls_qos_tbl_deinit(priv->qos_tbl);
- priv->qos_tbl = NULL;
- }
- if (priv->ppio)
- pp2_ppio_deinit(priv->ppio);
- priv->ppio = NULL;
-
- /* policer must be released after ppio deinitialization */
- if (priv->policer) {
- pp2_cls_plcr_deinit(priv->policer);
- priv->policer = NULL;
- }
}
/**
@@ -788,6 +859,11 @@ mrvl_dev_close(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
size_t i;
+ mrvl_flush_rx_queues(dev);
+ mrvl_flush_tx_shadow_queues(dev);
+ mrvl_flow_deinit(dev);
+ mrvl_mtr_deinit(dev);
+
for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
struct pp2_ppio_tc_params *tc_params =
&priv->ppio_params.inqs_params.tcs_params[i];
@@ -798,7 +874,29 @@ mrvl_dev_close(struct rte_eth_dev *dev)
}
}
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
+
+ if (priv->qos_tbl) {
+ pp2_cls_qos_tbl_deinit(priv->qos_tbl);
+ priv->qos_tbl = NULL;
+ }
+
mrvl_flush_bpool(dev);
+ mrvl_tm_deinit(dev);
+
+ if (priv->ppio) {
+ pp2_ppio_deinit(priv->ppio);
+ priv->ppio = NULL;
+ }
+
+ /* policer must be released after ppio deinitialization */
+ if (priv->default_policer) {
+ pp2_cls_plcr_deinit(priv->default_policer);
+ priv->default_policer = NULL;
+ }
}
/**
@@ -1337,7 +1435,6 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
/* By default packets are dropped if no descriptors are available */
info->default_rxconf.rx_drop_en = 1;
- info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP;
info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
}
@@ -1356,6 +1453,8 @@ mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L3_IPV4_EXT,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
@@ -1492,7 +1591,7 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
entries[i].buff.addr =
rte_mbuf_data_iova_default(mbufs[i]);
- entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
+ entries[i].buff.cookie = (uint64_t)mbufs[i];
entries[i].bpool = bpool;
}
@@ -1537,8 +1636,8 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
{
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_rxq *rxq;
- uint32_t min_size,
- max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
+ uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
uint64_t offloads;
@@ -1553,15 +1652,16 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -EFAULT;
}
- min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
- MRVL_PKT_EFFEC_OFFS;
- if (min_size < max_rx_pkt_len) {
- MRVL_LOG(ERR,
- "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
- max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
- MRVL_PKT_EFFEC_OFFS,
+ frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
+ if (frame_size < max_rx_pkt_len) {
+ MRVL_LOG(WARNING,
+ "Mbuf size must be increased to %u bytes to hold up "
+ "to %u bytes of data.",
+ buf_size + max_rx_pkt_len - frame_size,
max_rx_pkt_len);
- return -EINVAL;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ MRVL_LOG(INFO, "Setting max rx pkt len to %u",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
}
if (dev->data->rx_queues[idx]) {
@@ -1867,6 +1967,44 @@ mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
}
}
+/**
+ * DPDK callback to get rte_mtr callbacks.
+ *
+ * @param dev
+ * Pointer to the device structure.
+ * @param ops
+ * Pointer to pass the mtr ops.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
+{
+ *(const void **)ops = &mrvl_mtr_ops;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to get rte_tm callbacks.
+ *
+ * @param dev
+ * Pointer to the device structure.
+ * @param ops
+ * Pointer to pass the tm ops.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
+{
+ *(const void **)ops = &mrvl_tm_ops;
+
+ return 0;
+}
+
static const struct eth_dev_ops mrvl_ops = {
.dev_configure = mrvl_dev_configure,
.dev_start = mrvl_dev_start,
@@ -1904,6 +2042,8 @@ static const struct eth_dev_ops mrvl_ops = {
.rss_hash_update = mrvl_rss_hash_update,
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
.filter_ctrl = mrvl_eth_filter_ctrl,
+ .mtr_ops_get = mrvl_mtr_ops_get,
+ .tm_ops_get = mrvl_tm_ops_get,
};
/**
@@ -1925,13 +2065,27 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
{
enum pp2_inq_l3_type l3_type;
enum pp2_inq_l4_type l4_type;
+ enum pp2_inq_vlan_tag vlan_tag;
uint64_t packet_type;
pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
+ pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
packet_type = RTE_PTYPE_L2_ETHER;
+ switch (vlan_tag) {
+ case PP2_INQ_VLAN_TAG_SINGLE:
+ packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ break;
+ case PP2_INQ_VLAN_TAG_DOUBLE:
+ case PP2_INQ_VLAN_TAG_TRIPLE:
+ packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
+ break;
+ default:
+ break;
+ }
+
switch (l3_type) {
case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
packet_type |= RTE_PTYPE_L3_IPV4;
@@ -2073,7 +2227,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (unlikely(status != PP2_DESC_ERR_OK)) {
struct pp2_buff_inf binf = {
.addr = rte_mbuf_data_iova_default(mbuf),
- .cookie = (pp2_cookie_t)(uint64_t)mbuf,
+ .cookie = (uint64_t)mbuf,
};
pp2_bpool_put_buff(hif, bpool, &binf);
@@ -2334,22 +2488,8 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_mbuf_prefetch_part2(pref_pkt_hdr);
}
- sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
- sq->ent[sq->head].buff.addr =
- rte_mbuf_data_iova_default(mbuf);
- sq->ent[sq->head].bpool =
- (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
- mbuf->refcnt > 1)) ? NULL :
- mrvl_port_to_bpool_lookup[mbuf->port];
- sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
- sq->size++;
-
- pp2_ppio_outq_desc_reset(&descs[i]);
- pp2_ppio_outq_desc_set_phys_addr(&descs[i],
- rte_pktmbuf_iova(mbuf));
- pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
- pp2_ppio_outq_desc_set_pkt_len(&descs[i],
- rte_pktmbuf_pkt_len(mbuf));
+ mrvl_fill_shadowq(sq, mbuf);
+ mrvl_fill_desc(&descs[i], mbuf);
bytes_sent += rte_pktmbuf_pkt_len(mbuf);
/*
@@ -2387,6 +2527,152 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_pkts;
}
+/** DPDK callback for S/G transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct mrvl_txq *q = txq;
+ struct mrvl_shadow_txq *sq;
+ struct pp2_hif *hif;
+ struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS];
+ struct pp2_ppio_sg_pkts pkts;
+ uint8_t frags[nb_pkts];
+ unsigned int core_id = rte_lcore_id();
+ int i, j, ret, bytes_sent = 0;
+ int tail, tail_first;
+ uint16_t num, sq_free_size;
+ uint16_t nb_segs, total_descs = 0;
+ uint64_t addr;
+
+ hif = mrvl_get_hif(q->priv, core_id);
+ sq = &q->shadow_txqs[core_id];
+ pkts.frags = frags;
+ pkts.num = 0;
+
+ if (unlikely(!q->priv->ppio || !hif))
+ return 0;
+
+ if (sq->size)
+ mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
+ sq, q->queue_id, 0);
+
+ /* Save shadow queue free size */
+ sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
+
+ tail = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+ struct rte_mbuf *seg = NULL;
+ int gen_l3_cksum, gen_l4_cksum;
+ enum pp2_outq_l3_type l3_type;
+ enum pp2_outq_l4_type l4_type;
+
+ nb_segs = mbuf->nb_segs;
+ tail_first = tail;
+ total_descs += nb_segs;
+
+ /*
+ * Check if total_descs does not exceed
+ * shadow queue free size
+ */
+ if (unlikely(total_descs > sq_free_size)) {
+ total_descs -= nb_segs;
+ RTE_LOG(DEBUG, PMD,
+ "No room in shadow queue for %d packets! "
+ "%d packets will be sent.\n",
+ nb_pkts, i);
+ break;
+ }
+
+ /* Check if nb_segs does not exceed the max nb of desc per
+ * fragmented packet
+ */
+ if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) {
+ total_descs -= nb_segs;
+ RTE_LOG(ERR, PMD,
+ "Too many segments. Packet won't be sent.\n");
+ break;
+ }
+
+ if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
+ struct rte_mbuf *pref_pkt_hdr;
+
+ pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
+ rte_mbuf_prefetch_part1(pref_pkt_hdr);
+ rte_mbuf_prefetch_part2(pref_pkt_hdr);
+ }
+
+ pkts.frags[pkts.num] = nb_segs;
+ pkts.num++;
+
+ seg = mbuf;
+ for (j = 0; j < nb_segs - 1; j++) {
+ /* For the subsequent segments, set shadow queue
+ * buffer to NULL
+ */
+ mrvl_fill_shadowq(sq, NULL);
+ mrvl_fill_desc(&descs[tail], seg);
+
+ tail++;
+ seg = seg->next;
+ }
+ /* Put first mbuf info in last shadow queue entry */
+ mrvl_fill_shadowq(sq, mbuf);
+ /* Update descriptor with last segment */
+ mrvl_fill_desc(&descs[tail++], seg);
+
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
+ /* In case unsupported ol_flags were passed
+ * do not update descriptor offload information
+ */
+ ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
+ &l3_type, &l4_type, &gen_l3_cksum,
+ &gen_l4_cksum);
+ if (unlikely(ret))
+ continue;
+
+ pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type,
+ l4_type, mbuf->l2_len,
+ mbuf->l2_len + mbuf->l3_len,
+ gen_l3_cksum, gen_l4_cksum);
+ }
+
+ num = total_descs;
+ pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs,
+ &total_descs, &pkts);
+ /* number of packets that were not sent */
+ if (unlikely(num > total_descs)) {
+ for (i = total_descs; i < num; i++) {
+ sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+
+ addr = sq->ent[sq->head].buff.cookie;
+ if (addr)
+ bytes_sent -=
+ rte_pktmbuf_pkt_len((struct rte_mbuf *)
+ (cookie_addr_high | addr));
+ }
+ sq->size -= num - total_descs;
+ nb_pkts = pkts.num;
+ }
+
+ q->bytes_sent += bytes_sent;
+
+ return nb_pkts;
+}
+
/**
* Initialize packet processor.
*
@@ -2494,8 +2780,9 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
priv = mrvl_priv_create(name);
if (!priv) {
ret = -ENOMEM;
- goto out_free_dev;
+ goto out_free;
}
+ eth_dev->data->dev_private = priv;
eth_dev->data->mac_addrs =
rte_zmalloc("mac_addrs",
@@ -2503,33 +2790,28 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
if (!eth_dev->data->mac_addrs) {
MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
ret = -ENOMEM;
- goto out_free_priv;
+ goto out_free;
}
memset(&req, 0, sizeof(req));
strcpy(req.ifr_name, name);
ret = ioctl(fd, SIOCGIFHWADDR, &req);
if (ret)
- goto out_free_mac;
+ goto out_free;
memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
req.ifr_addr.sa_data, ETHER_ADDR_LEN);
- eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
- eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->dev_private = priv;
eth_dev->device = &vdev->device;
+ eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
+ mrvl_set_tx_function(eth_dev);
eth_dev->dev_ops = &mrvl_ops;
rte_eth_dev_probing_finish(eth_dev);
return 0;
-out_free_mac:
- rte_free(eth_dev->data->mac_addrs);
-out_free_dev:
+out_free:
rte_eth_dev_release_port(eth_dev);
-out_free_priv:
- rte_free(priv);
return ret;
}
@@ -2553,8 +2835,6 @@ mrvl_eth_dev_destroy(const char *name)
priv = eth_dev->data->dev_private;
pp2_bpool_deinit(priv->bpool);
used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
- rte_free(priv);
- rte_free(eth_dev->data->mac_addrs);
rte_eth_dev_release_port(eth_dev);
}
@@ -2654,23 +2934,16 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
goto init_devices;
MRVL_LOG(INFO, "Perform MUSDK initializations");
- /*
- * ret == -EEXIST is correct, it means DMA
- * has been already initialized (by another PMD).
- */
- ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
- if (ret < 0) {
- if (ret != -EEXIST)
- goto out_free_kvlist;
- else
- MRVL_LOG(INFO,
- "DMA memory has been already initialized by a different driver.");
- }
+
+ ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
+ if (ret)
+ goto out_free_kvlist;
ret = mrvl_init_pp2();
if (ret) {
MRVL_LOG(ERR, "Failed to init PP!");
- goto out_deinit_dma;
+ rte_mvep_deinit(MVEP_MOD_T_PP2);
+ goto out_free_kvlist;
}
memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
@@ -2695,11 +2968,10 @@ out_cleanup:
for (; i > 0; i--)
mrvl_eth_dev_destroy(ifnames.names[i]);
- if (mrvl_dev_num == 0)
+ if (mrvl_dev_num == 0) {
mrvl_deinit_pp2();
-out_deinit_dma:
- if (mrvl_dev_num == 0)
- mv_sys_dma_mem_destroy();
+ rte_mvep_deinit(MVEP_MOD_T_PP2);
+ }
out_free_kvlist:
rte_kvargs_free(kvlist);
@@ -2739,7 +3011,7 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
MRVL_LOG(INFO, "Perform MUSDK deinit");
mrvl_deinit_hifs();
mrvl_deinit_pp2();
- mv_sys_dma_mem_destroy();
+ rte_mvep_deinit(MVEP_MOD_T_PP2);
}
return 0;
diff --git a/drivers/net/mvpp2/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h
index 3726f788..0120b9e8 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.h
+++ b/drivers/net/mvpp2/mrvl_ethdev.h
@@ -9,6 +9,18 @@
#include <rte_spinlock.h>
#include <rte_flow_driver.h>
+#include <rte_mtr_driver.h>
+#include <rte_tm_driver.h>
+
+/*
+ * container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
#include <env/mv_autogen_comp_flags.h>
#include <drivers/mv_pp2.h>
@@ -16,6 +28,7 @@
#include <drivers/mv_pp2_cls.h>
#include <drivers/mv_pp2_hif.h>
#include <drivers/mv_pp2_ppio.h>
+#include "env/mv_common.h" /* for BIT() */
/** Maximum number of rx queues per port */
#define MRVL_PP2_RXQ_MAX 32
@@ -59,6 +72,99 @@
/** Minimum number of sent buffers to release from shadow queue to BM */
#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64
+#define MRVL_PP2_VLAN_TAG_LEN 4
+#define MRVL_PP2_ETH_HDRS_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
+ (2 * MRVL_PP2_VLAN_TAG_LEN))
+#define MRVL_PP2_HDRS_LEN (MV_MH_SIZE + MRVL_PP2_ETH_HDRS_LEN)
+#define MRVL_PP2_MTU_TO_MRU(mtu) ((mtu) + MRVL_PP2_HDRS_LEN)
+#define MRVL_PP2_MRU_TO_MTU(mru) ((mru) - MRVL_PP2_HDRS_LEN)
+
+/** Maximum length of a match string */
+#define MRVL_MATCH_LEN 16
+
+/** Parsed fields in processed rte_flow_item. */
+enum mrvl_parsed_fields {
+ /* eth flags */
+ F_DMAC = BIT(0),
+ F_SMAC = BIT(1),
+ F_TYPE = BIT(2),
+ /* vlan flags */
+ F_VLAN_PRI = BIT(3),
+ F_VLAN_ID = BIT(4),
+ F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
+ /* ip4 flags */
+ F_IP4_TOS = BIT(6),
+ F_IP4_SIP = BIT(7),
+ F_IP4_DIP = BIT(8),
+ F_IP4_PROTO = BIT(9),
+ /* ip6 flags */
+ F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
+ F_IP6_SIP = BIT(11),
+ F_IP6_DIP = BIT(12),
+ F_IP6_FLOW = BIT(13),
+ F_IP6_NEXT_HDR = BIT(14),
+ /* tcp flags */
+ F_TCP_SPORT = BIT(15),
+ F_TCP_DPORT = BIT(16),
+ /* udp flags */
+ F_UDP_SPORT = BIT(17),
+ F_UDP_DPORT = BIT(18),
+};
+
+/** PMD-specific definition of a flow rule handle. */
+struct mrvl_mtr;
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next;
+ struct mrvl_mtr *mtr;
+
+ enum mrvl_parsed_fields pattern;
+
+ struct pp2_cls_tbl_rule rule;
+ struct pp2_cls_cos_desc cos;
+ struct pp2_cls_tbl_action action;
+};
+
+struct mrvl_mtr_profile {
+ LIST_ENTRY(mrvl_mtr_profile) next;
+ uint32_t profile_id;
+ int refcnt;
+ struct rte_mtr_meter_profile profile;
+};
+
+struct mrvl_mtr {
+ LIST_ENTRY(mrvl_mtr) next;
+ uint32_t mtr_id;
+ int refcnt;
+ int shared;
+ int enabled;
+ int plcr_bit;
+ struct mrvl_mtr_profile *profile;
+ struct pp2_cls_plcr *plcr;
+};
+
+struct mrvl_tm_shaper_profile {
+ LIST_ENTRY(mrvl_tm_shaper_profile) next;
+ uint32_t id;
+ int refcnt;
+ struct rte_tm_shaper_params params;
+};
+
+enum {
+ MRVL_NODE_PORT,
+ MRVL_NODE_QUEUE,
+};
+
+struct mrvl_tm_node {
+ LIST_ENTRY(mrvl_tm_node) next;
+ uint32_t id;
+ uint32_t type;
+ int refcnt;
+ struct mrvl_tm_node *parent;
+ struct mrvl_tm_shaper_profile *profile;
+ uint8_t weight;
+ uint64_t stats_mask;
+};
+
struct mrvl_priv {
/* Hot fields, used in fast path. */
struct pp2_bpool *bpool; /**< BPool pointer */
@@ -82,6 +188,7 @@ struct mrvl_priv {
uint8_t uc_mc_flushed;
uint8_t vlan_flushed;
uint8_t isolated;
+ uint8_t multiseg;
struct pp2_ppio_params ppio_params;
struct pp2_cls_qos_tbl_params qos_tbl_params;
@@ -93,12 +200,26 @@ struct mrvl_priv {
uint32_t cls_tbl_pattern;
LIST_HEAD(mrvl_flows, rte_flow) flows;
- struct pp2_cls_plcr *policer;
+ struct pp2_cls_plcr *default_policer;
+
+ LIST_HEAD(profiles, mrvl_mtr_profile) profiles;
+ LIST_HEAD(mtrs, mrvl_mtr) mtrs;
+ uint32_t used_plcrs;
+
+ LIST_HEAD(shaper_profiles, mrvl_tm_shaper_profile) shaper_profiles;
+ LIST_HEAD(nodes, mrvl_tm_node) nodes;
+ uint64_t rate_max;
};
/** Flow operations forward declaration. */
extern const struct rte_flow_ops mrvl_flow_ops;
+/** Meter operations forward declaration. */
+extern const struct rte_mtr_ops mrvl_mtr_ops;
+
+/** Traffic manager operations forward declaration. */
+extern const struct rte_tm_ops mrvl_tm_ops;
+
/** Current log type. */
extern int mrvl_logtype;
diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c
index ecc34192..ffd1dab9 100644
--- a/drivers/net/mvpp2/mrvl_flow.c
+++ b/drivers/net/mvpp2/mrvl_flow.c
@@ -11,13 +11,8 @@
#include <arpa/inet.h>
-#ifdef container_of
-#undef container_of
-#endif
-
-#include "mrvl_ethdev.h"
+#include "mrvl_flow.h"
#include "mrvl_qos.h"
-#include "env/mv_common.h" /* for BIT() */
/** Number of rules in the classifier table. */
#define MRVL_CLS_MAX_NUM_RULES 20
@@ -25,46 +20,6 @@
/** Size of the classifier key and mask strings. */
#define MRVL_CLS_STR_SIZE_MAX 40
-/** Parsed fields in processed rte_flow_item. */
-enum mrvl_parsed_fields {
- /* eth flags */
- F_DMAC = BIT(0),
- F_SMAC = BIT(1),
- F_TYPE = BIT(2),
- /* vlan flags */
- F_VLAN_ID = BIT(3),
- F_VLAN_PRI = BIT(4),
- F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
- /* ip4 flags */
- F_IP4_TOS = BIT(6),
- F_IP4_SIP = BIT(7),
- F_IP4_DIP = BIT(8),
- F_IP4_PROTO = BIT(9),
- /* ip6 flags */
- F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
- F_IP6_SIP = BIT(11),
- F_IP6_DIP = BIT(12),
- F_IP6_FLOW = BIT(13),
- F_IP6_NEXT_HDR = BIT(14),
- /* tcp flags */
- F_TCP_SPORT = BIT(15),
- F_TCP_DPORT = BIT(16),
- /* udp flags */
- F_UDP_SPORT = BIT(17),
- F_UDP_DPORT = BIT(18),
-};
-
-/** PMD-specific definition of a flow rule handle. */
-struct rte_flow {
- LIST_ENTRY(rte_flow) next;
-
- enum mrvl_parsed_fields pattern;
-
- struct pp2_cls_tbl_rule rule;
- struct pp2_cls_cos_desc cos;
- struct pp2_cls_tbl_action action;
-};
-
static const enum rte_flow_item_type pattern_eth[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_END
@@ -394,7 +349,8 @@ mrvl_parse_init(const struct rte_flow_item *item,
*
* @param spec Pointer to the specific flow item.
* @param mask Pointer to the specific flow item's mask.
- * @param mask Pointer to the flow.
+ * @param parse_dst Parse either destination or source mac address.
+ * @param flow Pointer to the flow.
* @return 0 in case of success, negative error value otherwise.
*/
static int
@@ -613,6 +569,7 @@ mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
*
* @param spec Pointer to the specific flow item.
* @param mask Pointer to the specific flow item's mask.
+ * @param parse_dst Parse either destination or source ip address.
* @param flow Pointer to the flow.
* @return 0 in case of success, negative error value otherwise.
*/
@@ -726,6 +683,7 @@ mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
*
* @param spec Pointer to the specific flow item.
* @param mask Pointer to the specific flow item's mask.
+ * @param parse_dst Parse either destination or source ipv6 address.
* @param flow Pointer to the flow.
* @return 0 in case of success, negative error value otherwise.
*/
@@ -874,6 +832,7 @@ mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
*
* @param spec Pointer to the specific flow item.
* @param mask Pointer to the specific flow item's mask.
+ * @param parse_dst Parse either destination or source port.
* @param flow Pointer to the flow.
* @return 0 in case of success, negative error value otherwise.
*/
@@ -949,6 +908,7 @@ mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
*
* @param spec Pointer to the specific flow item.
* @param mask Pointer to the specific flow item's mask.
+ * @param parse_dst Parse either destination or source port.
* @param flow Pointer to the flow.
* @return 0 in case of success, negative error value otherwise.
*/
@@ -1022,7 +982,6 @@ mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
* @param item Pointer to the flow item.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
- * @param fields Pointer to the parsed parsed fields enum.
* @returns 0 on success, negative value otherwise.
*/
static int
@@ -1073,7 +1032,6 @@ out:
* @param item Pointer to the flow item.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
- * @param fields Pointer to the parsed parsed fields enum.
* @returns 0 on success, negative value otherwise.
*/
static int
@@ -1139,7 +1097,6 @@ out:
* @param item Pointer to the flow item.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
- * @param fields Pointer to the parsed parsed fields enum.
* @returns 0 on success, negative value otherwise.
*/
static int
@@ -1205,7 +1162,6 @@ out:
* @param item Pointer to the flow item.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
- * @param fields Pointer to the parsed parsed fields enum.
* @returns 0 on success, negative value otherwise.
*/
static int
@@ -1276,7 +1232,6 @@ out:
* @param item Pointer to the flow item.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
- * @param fields Pointer to the parsed parsed fields enum.
* @returns 0 on success, negative value otherwise.
*/
static int
@@ -1332,7 +1287,6 @@ out:
* @param item Pointer to the flow item.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
- * @param fields Pointer to the parsed parsed fields enum.
* @returns 0 on success, negative value otherwise.
*/
static int
@@ -1981,6 +1935,7 @@ mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
* @param pattern Pointer to the flow pattern table.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
* @returns 0 in case of success, negative value otherwise.
*/
static int
@@ -2300,19 +2255,59 @@ mrvl_flow_parse_actions(struct mrvl_priv *priv,
flow->action.type = PP2_CLS_TBL_ACT_DONE;
flow->action.cos = &flow->cos;
specified++;
+ } else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
+ const struct rte_flow_action_meter *meter;
+ struct mrvl_mtr *mtr;
+
+ meter = action->conf;
+ if (!meter)
+ return -rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid meter\n");
+
+ LIST_FOREACH(mtr, &priv->mtrs, next)
+ if (mtr->mtr_id == meter->mtr_id)
+ break;
+
+ if (!mtr)
+ return -rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Meter id does not exist\n");
+
+ if (!mtr->shared && mtr->refcnt)
+ return -rte_flow_error_set(error, EPERM,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Meter cannot be shared\n");
+
+ /*
+ * In case cos has already been set
+ * do not modify it.
+ */
+ if (!flow->cos.ppio) {
+ flow->cos.ppio = priv->ppio;
+ flow->cos.tc = 0;
+ }
+
+ flow->action.type = PP2_CLS_TBL_ACT_DONE;
+ flow->action.cos = &flow->cos;
+ flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
+ flow->mtr = mtr;
+ mtr->refcnt++;
+ specified++;
} else {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Action not supported");
return -rte_errno;
}
-
}
if (!specified) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "Action not specified");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Action not specified");
return -rte_errno;
}
@@ -2350,6 +2345,12 @@ mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
return mrvl_flow_parse_actions(priv, actions, flow, error);
}
+/**
+ * Get engine type for the given flow.
+ *
+ * @param field Pointer to the flow.
+ * @returns The type of the engine.
+ */
static inline enum pp2_cls_tbl_type
mrvl_engine_type(const struct rte_flow *flow)
{
@@ -2369,6 +2370,13 @@ mrvl_engine_type(const struct rte_flow *flow)
return PP2_CLS_TBL_MASKABLE;
}
+/**
+ * Create classifier table.
+ *
+ * @param dev Pointer to the device.
+ * @param flow Pointer to the very first flow.
+ * @returns 0 in case of success, negative value otherwise.
+ */
static int
mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
{
@@ -2429,7 +2437,8 @@ mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
if (first_flow->pattern & F_IP4_TOS) {
key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
- key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
+ key->proto_field[key->num_fields].field.ipv4 =
+ MV_NET_IP4_F_DSCP;
key->key_size += 1;
key->num_fields += 1;
}
@@ -2649,13 +2658,18 @@ mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
mrvl_free_all_key_mask(&flow->rule);
+ if (flow->mtr) {
+ flow->mtr->refcnt--;
+ flow->mtr = NULL;
+ }
+
return 0;
}
/**
* DPDK flow destroy callback called when flow is to be removed.
*
- * @param priv Pointer to the port's private data.
+ * @param dev Pointer to the device.
* @param flow Pointer to the flow.
* @param error Pointer to the flow error.
* @returns 0 in case of success, negative value otherwise.
@@ -2777,3 +2791,34 @@ const struct rte_flow_ops mrvl_flow_ops = {
.flush = mrvl_flow_flush,
.isolate = mrvl_flow_isolate
};
+
+/**
+ * Initialize flow resources.
+ *
+ * @param dev Pointer to the device.
+ */
+void
+mrvl_flow_init(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ LIST_INIT(&priv->flows);
+}
+
+/**
+ * Cleanup flow resources.
+ *
+ * @param dev Pointer to the device.
+ */
+void
+mrvl_flow_deinit(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ mrvl_flow_flush(dev, NULL);
+
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
+}
diff --git a/drivers/net/mvpp2/mrvl_flow.h b/drivers/net/mvpp2/mrvl_flow.h
new file mode 100644
index 00000000..f63747c1
--- /dev/null
+++ b/drivers/net/mvpp2/mrvl_flow.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MRVL_FLOW_H_
+#define _MRVL_FLOW_H_
+
+#include "mrvl_ethdev.h"
+
+void mrvl_flow_init(struct rte_eth_dev *dev);
+void mrvl_flow_deinit(struct rte_eth_dev *dev);
+
+#endif /* _MRVL_FLOW_H_ */
diff --git a/drivers/net/mvpp2/mrvl_mtr.c b/drivers/net/mvpp2/mrvl_mtr.c
new file mode 100644
index 00000000..9cd53bed
--- /dev/null
+++ b/drivers/net/mvpp2/mrvl_mtr.c
@@ -0,0 +1,512 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "mrvl_mtr.h"
+
+/** Maximum meter rate */
+#define MRVL_SRTCM_RFC2697_CIR_MAX 1023000
+
+/** Invalid plcr bit */
+#define MRVL_PLCR_BIT_INVALID -1
+
+/**
+ * Return meter object capabilities.
+ *
+ * @param dev Pointer to the device (unused).
+ * @param cap Pointer to the meter object capabilities.
+ * @param error Pointer to the error (unused).
+ * @returns 0 always.
+ */
+static int
+mrvl_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_mtr_capabilities *cap,
+ struct rte_mtr_error *error __rte_unused)
+{
+ struct rte_mtr_capabilities capa = {
+ .n_max = PP2_CLS_PLCR_NUM,
+ .n_shared_max = PP2_CLS_PLCR_NUM,
+ .shared_n_flows_per_mtr_max = -1,
+ .meter_srtcm_rfc2697_n_max = PP2_CLS_PLCR_NUM,
+ .meter_rate_max = MRVL_SRTCM_RFC2697_CIR_MAX,
+ };
+
+ memcpy(cap, &capa, sizeof(capa));
+
+ return 0;
+}
+
+/**
+ * Get profile using it's id.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param meter_profile_id Profile id used by the meter.
+ * @returns Pointer to the profile if exists, NULL otherwise.
+ */
+static struct mrvl_mtr_profile *
+mrvl_mtr_profile_from_id(struct mrvl_priv *priv, uint32_t meter_profile_id)
+{
+ struct mrvl_mtr_profile *profile = NULL;
+
+ LIST_FOREACH(profile, &priv->profiles, next)
+ if (profile->profile_id == meter_profile_id)
+ break;
+
+ return profile;
+}
+
+/**
+ * Add profile to the list of profiles.
+ *
+ * @param dev Pointer to the device.
+ * @param meter_profile_id Id of the new profile.
+ * @param profile Pointer to the profile configuration.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_meter_profile_add(struct rte_eth_dev *dev, uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr_profile *prof;
+
+ if (!profile)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ if (profile->alg != RTE_MTR_SRTCM_RFC2697)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Only srTCM RFC 2697 is supported\n");
+
+ prof = mrvl_mtr_profile_from_id(priv, meter_profile_id);
+ if (prof)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Profile id already exists\n");
+
+ prof = rte_zmalloc_socket(NULL, sizeof(*prof), 0, rte_socket_id());
+ if (!prof)
+ return -rte_mtr_error_set(error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ prof->profile_id = meter_profile_id;
+ memcpy(&prof->profile, profile, sizeof(*profile));
+
+ LIST_INSERT_HEAD(&priv->profiles, prof, next);
+
+ return 0;
+}
+
+/**
+ * Remove profile from the list of profiles.
+ *
+ * @param dev Pointer to the device.
+ * @param meter_profile_id Id of the profile to remove.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_meter_profile_delete(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr_profile *profile;
+
+ profile = mrvl_mtr_profile_from_id(priv, meter_profile_id);
+ if (!profile)
+ return -rte_mtr_error_set(error, ENODEV,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Profile id does not exist\n");
+
+ if (profile->refcnt)
+ return -rte_mtr_error_set(error, EPERM,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Profile is used\n");
+
+ LIST_REMOVE(profile, next);
+ rte_free(profile);
+
+ return 0;
+}
+
+/**
+ * Get meter using it's id.
+ *
+ * @param priv Pointer to port's private data.
+ * @param mtr_id Id of the meter.
+ * @returns Pointer to the meter if exists, NULL otherwise.
+ */
+static struct mrvl_mtr *
+mrvl_mtr_from_id(struct mrvl_priv *priv, uint32_t mtr_id)
+{
+ struct mrvl_mtr *mtr = NULL;
+
+ LIST_FOREACH(mtr, &priv->mtrs, next)
+ if (mtr->mtr_id == mtr_id)
+ break;
+
+ return mtr;
+}
+
+/**
+ * Reserve a policer bit in a bitmap.
+ *
+ * @param plcrs Pointer to the policers bitmap.
+ * @returns Reserved bit number on success, negative value otherwise.
+ */
+static int
+mrvl_reserve_plcr(uint32_t *plcrs)
+{
+ uint32_t i, num;
+
+ num = PP2_CLS_PLCR_NUM;
+ if (num > sizeof(uint32_t) * 8) {
+ num = sizeof(uint32_t) * 8;
+ MRVL_LOG(WARNING, "Plcrs number was limited to 32.");
+ }
+
+ for (i = 0; i < num; i++) {
+ uint32_t bit = BIT(i);
+
+ if (!(*plcrs & bit)) {
+ *plcrs |= bit;
+
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * Enable meter object.
+ *
+ * @param dev Pointer to the device.
+ * @param mtr_id Id of the meter.
+ * @param error Pointer to the error.
+ * @returns 0 in success, negative value otherwise.
+ */
+static int
+mrvl_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr *mtr = mrvl_mtr_from_id(priv, mtr_id);
+ struct pp2_cls_plcr_params params;
+ char match[MRVL_MATCH_LEN];
+ struct rte_flow *flow;
+ int ret;
+
+ if (!priv->ppio)
+ return -rte_mtr_error_set(error, EPERM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is uninitialized\n");
+
+ if (!mtr)
+ return -rte_mtr_error_set(error, ENODEV,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter id does not exist\n");
+
+ if (mtr->plcr)
+ goto skip;
+
+ mtr->plcr_bit = mrvl_reserve_plcr(&priv->used_plcrs);
+ if (mtr->plcr_bit < 0)
+ return -rte_mtr_error_set(error, ENOSPC,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to reserve plcr entry\n");
+
+ memset(&params, 0, sizeof(params));
+ snprintf(match, sizeof(match), "policer-%d:%d", priv->pp_id,
+ mtr->plcr_bit);
+ params.match = match;
+ params.token_unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT;
+ params.color_mode = PP2_CLS_PLCR_COLOR_BLIND_MODE;
+ params.cir = mtr->profile->profile.srtcm_rfc2697.cir;
+ params.cbs = mtr->profile->profile.srtcm_rfc2697.cbs;
+ params.ebs = mtr->profile->profile.srtcm_rfc2697.ebs;
+
+ ret = pp2_cls_plcr_init(&params, &mtr->plcr);
+ if (ret) {
+ priv->used_plcrs &= ~BIT(mtr->plcr_bit);
+ mtr->plcr_bit = MRVL_PLCR_BIT_INVALID;
+
+ return -rte_mtr_error_set(error, -ret,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to setup policer\n");
+ }
+
+ mtr->enabled = 1;
+skip:
+ /* iterate over flows that have this mtr attached */
+ LIST_FOREACH(flow, &priv->flows, next) {
+ if (flow->mtr != mtr)
+ continue;
+
+ flow->action.plcr = mtr->plcr;
+
+ ret = pp2_cls_tbl_modify_rule(priv->cls_tbl, &flow->rule,
+ &flow->action);
+ if (ret)
+ return -rte_mtr_error_set(error, -ret,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to update cls rule\n");
+ }
+
+ return 0;
+}
+
+/**
+ * Disable meter object.
+ *
+ * @param dev Pointer to the device.
+ * @param mtr Id of the meter.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr *mtr = mrvl_mtr_from_id(priv, mtr_id);
+ struct rte_flow *flow;
+ int ret;
+
+ if (!mtr)
+ return -rte_mtr_error_set(error, ENODEV,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter id does not exist\n");
+
+ LIST_FOREACH(flow, &priv->flows, next) {
+ if (flow->mtr != mtr)
+ continue;
+
+ flow->action.plcr = NULL;
+
+ ret = pp2_cls_tbl_modify_rule(priv->cls_tbl, &flow->rule,
+ &flow->action);
+ if (ret)
+ return -rte_mtr_error_set(error, -ret,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to disable meter\n");
+ }
+
+ mtr->enabled = 0;
+
+ return 0;
+}
+
+/**
+ * Create new meter.
+ *
+ * @param dev Pointer to the device.
+ * @param mtr_id Id of the meter.
+ * @param params Pointer to the meter parameters.
+ * @param shared Flags indicating whether meter is shared.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_create(struct rte_eth_dev *dev, uint32_t mtr_id,
+ struct rte_mtr_params *params, int shared,
+ struct rte_mtr_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr_profile *profile;
+ struct mrvl_mtr *mtr;
+
+ mtr = mrvl_mtr_from_id(priv, mtr_id);
+ if (mtr)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter id already exists\n");
+
+ mtr = rte_zmalloc_socket(NULL, sizeof(*mtr), 0, rte_socket_id());
+ if (!mtr)
+ return -rte_mtr_error_set(error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ profile = mrvl_mtr_profile_from_id(priv, params->meter_profile_id);
+ if (!profile)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Profile id does not exist\n");
+
+ mtr->shared = shared;
+ mtr->mtr_id = mtr_id;
+ mtr->plcr_bit = MRVL_PLCR_BIT_INVALID;
+ mtr->profile = profile;
+ profile->refcnt++;
+ LIST_INSERT_HEAD(&priv->mtrs, mtr, next);
+
+ if (params->meter_enable)
+ return mrvl_meter_enable(dev, mtr_id, error);
+
+ return 0;
+}
+
+/**
+ * Destroy meter object.
+ *
+ * @param dev Pointer to the device.
+ * @param mtr_id Id of the meter object.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr *mtr;
+
+ if (!priv->ppio)
+ return -rte_mtr_error_set(error, EPERM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is uninitialized\n");
+
+ mtr = mrvl_mtr_from_id(priv, mtr_id);
+ if (!mtr)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter id does not exist\n");
+
+ if (mtr->refcnt)
+ return -rte_mtr_error_set(error, EPERM,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter is used\n");
+
+ LIST_REMOVE(mtr, next);
+ mtr->profile->refcnt--;
+
+ if (mtr->plcr_bit != MRVL_PLCR_BIT_INVALID)
+ priv->used_plcrs &= ~BIT(mtr->plcr_bit);
+
+ if (mtr->plcr)
+ pp2_cls_plcr_deinit(mtr->plcr);
+
+ rte_free(mtr);
+
+ return 0;
+}
+
+/**
+ * Update profile used by the meter.
+ *
+ * @param dev Pointer to the device.
+ * @param mtr_id Id of the meter object.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_meter_profile_update(struct rte_eth_dev *dev, uint32_t mtr_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr_profile *profile;
+ struct mrvl_mtr *mtr;
+ int ret, enabled;
+
+ if (!priv->ppio)
+ return -rte_mtr_error_set(error, EPERM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is uninitialized\n");
+
+ mtr = mrvl_mtr_from_id(priv, mtr_id);
+ if (!mtr)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter id does not exist\n");
+
+ profile = mrvl_mtr_profile_from_id(priv, meter_profile_id);
+ if (!profile)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Profile id does not exist\n");
+
+ ret = mrvl_meter_disable(dev, mtr_id, error);
+ if (ret)
+ return -rte_mtr_error_set(error, EPERM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+ NULL);
+
+ if (mtr->plcr) {
+ enabled = 1;
+ pp2_cls_plcr_deinit(mtr->plcr);
+ mtr->plcr = NULL;
+ }
+
+ mtr->profile->refcnt--;
+ mtr->profile = profile;
+ profile->refcnt++;
+
+ if (enabled)
+ return mrvl_meter_enable(dev, mtr_id, error);
+
+ return 0;
+}
+
+const struct rte_mtr_ops mrvl_mtr_ops = {
+ .capabilities_get = mrvl_capabilities_get,
+ .meter_profile_add = mrvl_meter_profile_add,
+ .meter_profile_delete = mrvl_meter_profile_delete,
+ .create = mrvl_create,
+ .destroy = mrvl_destroy,
+ .meter_enable = mrvl_meter_enable,
+ .meter_disable = mrvl_meter_disable,
+ .meter_profile_update = mrvl_meter_profile_update,
+};
+
+/**
+ * Initialize metering resources.
+ *
+ * @param dev Pointer to the device.
+ */
+void
+mrvl_mtr_init(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ LIST_INIT(&priv->profiles);
+ LIST_INIT(&priv->mtrs);
+}
+
+/**
+ * Cleanup metering resources.
+ *
+ * @param dev Pointer to the device.
+ */
+void
+mrvl_mtr_deinit(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_mtr_profile *profile, *tmp_profile;
+ struct mrvl_mtr *mtr, *tmp_mtr;
+
+ for (mtr = LIST_FIRST(&priv->mtrs);
+ mtr && (tmp_mtr = LIST_NEXT(mtr, next), 1);
+ mtr = tmp_mtr)
+ mrvl_destroy(dev, mtr->mtr_id, NULL);
+
+ for (profile = LIST_FIRST(&priv->profiles);
+ profile && (tmp_profile = LIST_NEXT(profile, next), 1);
+ profile = tmp_profile)
+ mrvl_meter_profile_delete(dev, profile->profile_id, NULL);
+}
diff --git a/drivers/net/mvpp2/mrvl_mtr.h b/drivers/net/mvpp2/mrvl_mtr.h
new file mode 100644
index 00000000..302a20fb
--- /dev/null
+++ b/drivers/net/mvpp2/mrvl_mtr.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MRVL_MTR_H_
+#define _MRVL_MTR_H_
+
+#include "mrvl_ethdev.h"
+
+void mrvl_mtr_init(struct rte_eth_dev *dev);
+void mrvl_mtr_deinit(struct rte_eth_dev *dev);
+
+#endif /* _MRVL_MTR_H_ */
diff --git a/drivers/net/mvpp2/mrvl_qos.c b/drivers/net/mvpp2/mrvl_qos.c
index 71856c1a..7fd97030 100644
--- a/drivers/net/mvpp2/mrvl_qos.c
+++ b/drivers/net/mvpp2/mrvl_qos.c
@@ -15,15 +15,6 @@
#include <rte_malloc.h>
#include <rte_string_fns.h>
-/* Unluckily, container_of is defined by both DPDK and MUSDK,
- * we'll declare only one version.
- *
- * Note that it is not used in this PMD anyway.
- */
-#ifdef container_of
-#undef container_of
-#endif
-
#include "mrvl_qos.h"
/* Parsing tokens. Defined conveniently, so that any correction is easy. */
@@ -51,7 +42,8 @@
#define MRVL_TOK_WRR_WEIGHT "wrr_weight"
/* policer specific configuration tokens */
-#define MRVL_TOK_PLCR_ENABLE "policer_enable"
+#define MRVL_TOK_PLCR "policer"
+#define MRVL_TOK_PLCR_DEFAULT "default_policer"
#define MRVL_TOK_PLCR_UNIT "token_unit"
#define MRVL_TOK_PLCR_UNIT_BYTES "bytes"
#define MRVL_TOK_PLCR_UNIT_PACKETS "packets"
@@ -332,6 +324,7 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
return 0;
+ cfg->port[port].use_global_defaults = 0;
entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ);
if (entry) {
n = get_entry_values(entry,
@@ -377,6 +370,9 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
cfg->port[port].tc[tc].dscps = n;
}
+ if (!cfg->port[port].setup_policer)
+ return 0;
+
entry = rte_cfgfile_get_entry(file, sec_name,
MRVL_TOK_PLCR_DEFAULT_COLOR);
if (entry) {
@@ -399,6 +395,85 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
}
/**
+ * Parse default port policer.
+ *
+ * @param file Config file handle.
+ * @param sec_name Section name with policer configuration
+ * @param port Port number.
+ * @param cfg[out] Parsing results.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+parse_policer(struct rte_cfgfile *file, int port, const char *sec_name,
+ struct mrvl_qos_cfg *cfg)
+{
+ const char *entry;
+ uint32_t val;
+
+ /* Read policer token unit */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_UNIT);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES,
+ sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) {
+ cfg->port[port].policer_params.token_unit =
+ PP2_CLS_PLCR_BYTES_TOKEN_UNIT;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_PACKETS,
+ sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) {
+ cfg->port[port].policer_params.token_unit =
+ PP2_CLS_PLCR_PACKETS_TOKEN_UNIT;
+ } else {
+ MRVL_LOG(ERR, "Unknown token: %s", entry);
+ return -1;
+ }
+ }
+
+ /* Read policer color mode */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_COLOR);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND,
+ sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) {
+ cfg->port[port].policer_params.color_mode =
+ PP2_CLS_PLCR_COLOR_BLIND_MODE;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_AWARE,
+ sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) {
+ cfg->port[port].policer_params.color_mode =
+ PP2_CLS_PLCR_COLOR_AWARE_MODE;
+ } else {
+ MRVL_LOG(ERR, "Error in parsing: %s", entry);
+ return -1;
+ }
+ }
+
+ /* Read policer cir */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_CIR);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].policer_params.cir = val;
+ }
+
+ /* Read policer cbs */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_CBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].policer_params.cbs = val;
+ }
+
+ /* Read policer ebs */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PLCR_EBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].policer_params.ebs = val;
+ }
+
+ cfg->port[port].setup_policer = 1;
+
+ return 0;
+}
+
+/**
* Parse QoS configuration - rte_kvargs_process handler.
*
* Opens configuration file and parses its content.
@@ -444,110 +519,15 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
snprintf(sec_name, sizeof(sec_name), "%s %d %s",
MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT);
+ /* Use global defaults, unless an override occurs */
+ (*cfg)->port[n].use_global_defaults = 1;
+
/* Skip ports non-existing in configuration. */
if (rte_cfgfile_num_sections(file, sec_name,
strlen(sec_name)) <= 0) {
- (*cfg)->port[n].use_global_defaults = 1;
- (*cfg)->port[n].mapping_priority =
- PP2_CLS_QOS_TBL_VLAN_IP_PRI;
continue;
}
- entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_DEFAULT_TC);
- if (entry) {
- if (get_val_securely(entry, &val) < 0 ||
- val > USHRT_MAX)
- return -1;
- (*cfg)->port[n].default_tc = (uint8_t)val;
- } else {
- MRVL_LOG(ERR,
- "Default Traffic Class required in custom configuration!");
- return -1;
- }
-
- entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_PLCR_ENABLE);
- if (entry) {
- if (get_val_securely(entry, &val) < 0)
- return -1;
- (*cfg)->port[n].policer_enable = val;
- }
-
- if ((*cfg)->port[n].policer_enable) {
- enum pp2_cls_plcr_token_unit unit;
-
- /* Read policer token unit */
- entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_PLCR_UNIT);
- if (entry) {
- if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES,
- sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) {
- unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT;
- } else if (!strncmp(entry,
- MRVL_TOK_PLCR_UNIT_PACKETS,
- sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) {
- unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT;
- } else {
- MRVL_LOG(ERR, "Unknown token: %s",
- entry);
- return -1;
- }
- (*cfg)->port[n].policer_params.token_unit =
- unit;
- }
-
- /* Read policer color mode */
- entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_PLCR_COLOR);
- if (entry) {
- enum pp2_cls_plcr_color_mode mode;
-
- if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND,
- sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) {
- mode = PP2_CLS_PLCR_COLOR_BLIND_MODE;
- } else if (!strncmp(entry,
- MRVL_TOK_PLCR_COLOR_AWARE,
- sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) {
- mode = PP2_CLS_PLCR_COLOR_AWARE_MODE;
- } else {
- MRVL_LOG(ERR,
- "Error in parsing: %s",
- entry);
- return -1;
- }
- (*cfg)->port[n].policer_params.color_mode =
- mode;
- }
-
- /* Read policer cir */
- entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_PLCR_CIR);
- if (entry) {
- if (get_val_securely(entry, &val) < 0)
- return -1;
- (*cfg)->port[n].policer_params.cir = val;
- }
-
- /* Read policer cbs */
- entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_PLCR_CBS);
- if (entry) {
- if (get_val_securely(entry, &val) < 0)
- return -1;
- (*cfg)->port[n].policer_params.cbs = val;
- }
-
- /* Read policer ebs */
- entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_PLCR_EBS);
- if (entry) {
- if (get_val_securely(entry, &val) < 0)
- return -1;
- (*cfg)->port[n].policer_params.ebs = val;
- }
- }
-
/*
* Read per-port rate limiting. Setting that will
* disable per-queue rate limiting.
@@ -581,6 +561,7 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
entry = rte_cfgfile_get_entry(file, sec_name,
MRVL_TOK_MAPPING_PRIORITY);
if (entry) {
+ (*cfg)->port[n].use_global_defaults = 0;
if (!strncmp(entry, MRVL_TOK_VLAN_IP,
sizeof(MRVL_TOK_VLAN_IP)))
(*cfg)->port[n].mapping_priority =
@@ -606,6 +587,21 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
PP2_CLS_QOS_TBL_VLAN_IP_PRI;
}
+ /* Parse policer configuration (if any) */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_DEFAULT);
+ if (entry) {
+ (*cfg)->port[n].use_global_defaults = 0;
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+
+ snprintf(sec_name, sizeof(sec_name), "%s %d",
+ MRVL_TOK_PLCR, val);
+ ret = parse_policer(file, n, sec_name, *cfg);
+ if (ret)
+ return -1;
+ }
+
for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) {
ret = get_outq_cfg(file, n, i, *cfg);
if (ret < 0)
@@ -621,6 +617,21 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
"Error %d parsing port %d tc %d!\n",
ret, n, i);
}
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_DEFAULT_TC);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0 ||
+ val > USHRT_MAX)
+ return -1;
+ (*cfg)->port[n].default_tc = (uint8_t)val;
+ } else {
+ if ((*cfg)->port[n].use_global_defaults == 0) {
+ MRVL_LOG(ERR,
+ "Default Traffic Class required in custom configuration!");
+ return -1;
+ }
+ }
}
return 0;
@@ -643,7 +654,7 @@ setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
struct pp2_ppio_inq_params *inq_params;
param->pkt_offset = MRVL_PKT_OFFS;
- param->pools[0] = bpool;
+ param->pools[0][0] = bpool;
param->default_color = color;
inq_params = rte_zmalloc_socket("inq_params",
@@ -668,6 +679,7 @@ setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
*
* @param priv Port's private data.
* @param params Pointer to the policer's configuration.
+ * @param plcr_id Policer id.
* @returns 0 in case of success, negative values otherwise.
*/
static int
@@ -676,17 +688,23 @@ setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params)
char match[16];
int ret;
- snprintf(match, sizeof(match), "policer-%d:%d\n",
- priv->pp_id, priv->ppio_id);
+ /*
+ * At this point no other policers are used which means
+ * any policer can be picked up and used as a default one.
+ *
+ * Lets use 0th then.
+ */
+ sprintf(match, "policer-%d:%d\n", priv->pp_id, 0);
params->match = match;
- ret = pp2_cls_plcr_init(params, &priv->policer);
+ ret = pp2_cls_plcr_init(params, &priv->default_policer);
if (ret) {
MRVL_LOG(ERR, "Failed to setup %s", match);
return -1;
}
- priv->ppio_params.inqs_params.plcr = priv->policer;
+ priv->ppio_params.inqs_params.plcr = priv->default_policer;
+ priv->used_plcrs = BIT(0);
return 0;
}
@@ -818,7 +836,7 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
priv->ppio_params.inqs_params.num_tcs = i;
- if (port_cfg->policer_enable)
+ if (port_cfg->setup_policer)
return setup_policer(priv, &port_cfg->policer_params);
return 0;
diff --git a/drivers/net/mvpp2/mrvl_qos.h b/drivers/net/mvpp2/mrvl_qos.h
index fa9ddecb..f03e7731 100644
--- a/drivers/net/mvpp2/mrvl_qos.h
+++ b/drivers/net/mvpp2/mrvl_qos.h
@@ -43,7 +43,7 @@ struct mrvl_qos_cfg {
uint8_t default_tc;
uint8_t use_global_defaults;
struct pp2_cls_plcr_params policer_params;
- uint8_t policer_enable;
+ uint8_t setup_policer;
} port[RTE_MAX_ETHPORTS];
};
diff --git a/drivers/net/mvpp2/mrvl_tm.c b/drivers/net/mvpp2/mrvl_tm.c
new file mode 100644
index 00000000..3de89970
--- /dev/null
+++ b/drivers/net/mvpp2/mrvl_tm.c
@@ -0,0 +1,1009 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_malloc.h>
+
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+
+#include "mrvl_tm.h"
+
+/** Minimum rate value in Bytes/s */
+#define MRVL_RATE_MIN (PP2_PPIO_MIN_CIR * 1000 / 8)
+
+/** Minimum burst size in Bytes */
+#define MRVL_BURST_MIN (PP2_PPIO_MIN_CBS * 1000)
+
+/** Maximum burst size in Bytes */
+#define MRVL_BURST_MAX 256000000
+
+/** Maximum WRR weight */
+#define MRVL_WEIGHT_MAX 255
+
+/**
+ * Get maximum port rate in Bytes/s.
+ *
+ * @param dev Pointer to the device.
+ * @param rate Pointer to the rate.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_get_max_rate(struct rte_eth_dev *dev, uint64_t *rate)
+{
+ struct ethtool_cmd edata;
+ struct ifreq req;
+ int ret, fd;
+
+ memset(&edata, 0, sizeof(edata));
+ memset(&req, 0, sizeof(req));
+ edata.cmd = ETHTOOL_GSET;
+ strcpy(req.ifr_name, dev->data->name);
+ req.ifr_data = (void *)&edata;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd == -1)
+ return -1;
+
+ ret = ioctl(fd, SIOCETHTOOL, &req);
+ if (ret == -1) {
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ *rate = ethtool_cmd_speed(&edata) * 1000 * 1000 / 8;
+
+ return 0;
+}
+
+/**
+ * Initialize traffic manager related data.
+ *
+ * @param dev Pointer to the device.
+ * @returns 0 on success, failure otherwise.
+ */
+int
+mrvl_tm_init(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ LIST_INIT(&priv->shaper_profiles);
+ LIST_INIT(&priv->nodes);
+
+ if (priv->rate_max)
+ return 0;
+
+ return mrvl_get_max_rate(dev, &priv->rate_max);
+}
+
+/**
+ * Cleanup traffic manager related data.
+ *
+ * @param dev Pointer to the device.
+ */
+void mrvl_tm_deinit(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_shaper_profile *profile =
+ LIST_FIRST(&priv->shaper_profiles);
+ struct mrvl_tm_node *node = LIST_FIRST(&priv->nodes);
+
+ while (profile) {
+ struct mrvl_tm_shaper_profile *next = LIST_NEXT(profile, next);
+
+ LIST_REMOVE(profile, next);
+ rte_free(profile);
+ profile = next;
+ }
+
+ while (node) {
+ struct mrvl_tm_node *next = LIST_NEXT(node, next);
+
+ LIST_REMOVE(node, next);
+ rte_free(node);
+ node = next;
+ }
+}
+
+/**
+ * Get node using its id.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param node_id Id used by this node.
+ * @returns Pointer to the node if exists, NULL otherwise.
+ */
+static struct mrvl_tm_node *
+mrvl_node_from_id(struct mrvl_priv *priv, uint32_t node_id)
+{
+ struct mrvl_tm_node *node;
+
+ LIST_FOREACH(node, &priv->nodes, next)
+ if (node->id == node_id)
+ return node;
+
+ return NULL;
+}
+
+/**
+ * Check whether node is leaf or root.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id used by this node.
+ * @param is_leaf Pointer to flag indicating whether node is a leaf.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node;
+
+ if (!is_leaf)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ *is_leaf = node->type == MRVL_NODE_QUEUE ? 1 : 0;
+
+ return 0;
+}
+
+/**
+ * Get traffic manager capabilities.
+ *
+ * @param dev Pointer to the device (unused).
+ * @param cap Pointer to the capabilities.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!cap)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Capabilities are missing\n");
+
+ memset(cap, 0, sizeof(*cap));
+
+ cap->n_nodes_max = 1 + dev->data->nb_tx_queues; /* port + txqs number */
+ cap->n_levels_max = 2; /* port level + txqs level */
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->shaper_n_max;
+ cap->shaper_private_rate_min = MRVL_RATE_MIN;
+ cap->shaper_private_rate_max = priv->rate_max;
+
+ cap->sched_n_children_max = dev->data->nb_tx_queues;
+ cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
+ cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = MRVL_WEIGHT_MAX;
+
+ cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_SUSPEND_RESUME |
+ RTE_TM_UPDATE_NODE_STATS;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+
+ return 0;
+}
+
+/**
+ * Get traffic manager hierarchy level capabilities.
+ *
+ * @param dev Pointer to the device.
+ * @param level_id Id of the level.
+ * @param cap Pointer to the level capabilities.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!cap)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ memset(cap, 0, sizeof(*cap));
+
+ if (level_id != MRVL_NODE_PORT && level_id != MRVL_NODE_QUEUE)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL, "Wrong level id\n");
+
+ if (level_id == MRVL_NODE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = 1;
+ cap->nonleaf.shaper_private_rate_min = MRVL_RATE_MIN;
+ cap->nonleaf.shaper_private_rate_max = priv->rate_max;
+
+ cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ dev->data->nb_tx_queues;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX;
+ cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ } else { /* level_id == MRVL_NODE_QUEUE */
+ cap->n_nodes_max = dev->data->nb_tx_queues;
+ cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
+ cap->leaf_nodes_identical = 1;
+
+ cap->leaf.shaper_private_supported = 1;
+ cap->leaf.shaper_private_rate_min = MRVL_RATE_MIN;
+ cap->leaf.shaper_private_rate_max = priv->rate_max;
+ cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
+ }
+
+ return 0;
+}
+
+/**
+ * Get node capabilities.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id of the node.
+ * @param cap Pointer to the capabilities.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node;
+
+ if (!cap)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ memset(cap, 0, sizeof(*cap));
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_rate_min = MRVL_RATE_MIN;
+ cap->shaper_private_rate_max = priv->rate_max;
+
+ if (node->type == MRVL_NODE_PORT) {
+ cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ dev->data->nb_tx_queues;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ } else {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS;
+ }
+
+ return 0;
+}
+
+/**
+ * Get shaper profile using its id.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param shaper_profile_id Id used by the shaper.
+ * @returns Pointer to the shaper profile if exists, NULL otherwise.
+ */
+static struct mrvl_tm_shaper_profile *
+mrvl_shaper_profile_from_id(struct mrvl_priv *priv, uint32_t shaper_profile_id)
+{
+ struct mrvl_tm_shaper_profile *profile;
+
+ LIST_FOREACH(profile, &priv->shaper_profiles, next)
+ if (profile->id == shaper_profile_id)
+ return profile;
+
+ return NULL;
+}
+
+/**
+ * Add a new shaper profile.
+ *
+ * @param dev Pointer to the device.
+ * @param shaper_profile_id Id of the new profile.
+ * @param params Pointer to the shaper profile parameters.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *params,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_shaper_profile *profile;
+
+ if (!params)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ if (params->committed.rate)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ NULL, "Committed rate not supported\n");
+
+ if (params->committed.size)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE,
+ NULL, "Committed bucket size not supported\n");
+
+ if (params->peak.rate < MRVL_RATE_MIN ||
+ params->peak.rate > priv->rate_max)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL, "Peak rate is out of range\n");
+
+ if (params->peak.size < MRVL_BURST_MIN ||
+ params->peak.size > MRVL_BURST_MAX)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL, "Peak size is out of range\n");
+
+ if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Wrong shaper profile id\n");
+
+ profile = mrvl_shaper_profile_from_id(priv, shaper_profile_id);
+ if (profile)
+ return -rte_tm_error_set(error, EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Profile id already exists\n");
+
+ profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0,
+ rte_socket_id());
+ if (!profile)
+ return -rte_tm_error_set(error, ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ profile->id = shaper_profile_id;
+ rte_memcpy(&profile->params, params, sizeof(profile->params));
+
+ LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next);
+
+ return 0;
+}
+
+/**
+ * Remove a shaper profile.
+ *
+ * @param dev Pointer to the device.
+ * @param shaper_profile_id Id of the shaper profile.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_shaper_profile *profile;
+
+ profile = mrvl_shaper_profile_from_id(priv, shaper_profile_id);
+ if (!profile)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Profile id does not exist\n");
+
+ if (profile->refcnt)
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Profile is used\n");
+
+ LIST_REMOVE(profile, next);
+ rte_free(profile);
+
+ return 0;
+}
+
+/**
+ * Check node parameters.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id used by the node.
+ * @param priority Priority value.
+ * @param weight Weight value.
+ * @param level_id Id of the level.
+ * @param params Pointer to the node parameters.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t priority, uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL,
+ NULL, "Node id is invalid\n");
+
+ if (priority)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL, "Priority should be 0\n");
+
+ if (weight > MRVL_WEIGHT_MAX)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL, "Weight is out of range\n");
+
+ if (level_id != MRVL_NODE_PORT && level_id != MRVL_NODE_QUEUE)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL, "Wrong level id\n");
+
+ if (!params)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ if (params->shared_shaper_id)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
+ NULL, "Shared shaper is not supported\n");
+
+ if (params->n_shared_shapers)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL, "Shared shaper is not supported\n");
+
+ /* verify port (root node) settings */
+ if (node_id >= dev->data->nb_tx_queues) {
+ if (params->nonleaf.wfq_weight_mode)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
+ NULL, "WFQ is not supported\n");
+
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL, "SP is not supported\n");
+
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
+
+ return 0;
+ }
+
+ /* verify txq (leaf node) settings */
+ if (params->leaf.cman)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
+ NULL,
+ "Congestion mngmt is not supported\n");
+
+ if (params->leaf.wred.wred_profile_id)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
+ NULL, "WRED is not supported\n");
+
+ if (params->leaf.wred.shared_wred_context_id)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID,
+ NULL, "WRED is not supported\n");
+
+ if (params->leaf.wred.n_shared_wred_contexts)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
+ NULL, "WRED is not supported\n");
+
+ if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested txq stats are not supported\n");
+
+ return 0;
+}
+
+/**
+ * Add a new node.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id of the node.
+ * @param parent_node_id Id of the parent node.
+ * @param priority Priority value.
+ * @param weight Weight value.
+ * @param level_id Id of the level.
+ * @param params Pointer to the node parameters.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority, uint32_t weight,
+ uint32_t level_id, struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_shaper_profile *profile = NULL;
+ struct mrvl_tm_node *node, *parent = NULL;
+ int ret;
+
+ if (priv->ppio)
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is already started\n");
+
+ ret = mrvl_node_check_params(dev, node_id, priority, weight, level_id,
+ params, error);
+ if (ret)
+ return ret;
+
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ profile = mrvl_shaper_profile_from_id(priv,
+ params->shaper_profile_id);
+ if (!profile)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL, "Shaper id does not exist\n");
+ }
+
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ LIST_FOREACH(node, &priv->nodes, next) {
+ if (node->type != MRVL_NODE_PORT)
+ continue;
+
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Root node exists\n");
+ }
+ } else {
+ parent = mrvl_node_from_id(priv, parent_node_id);
+ if (!parent)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL, "Node id does not exist\n");
+ }
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id already exists\n");
+
+ node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id());
+ if (!node)
+ return -rte_tm_error_set(error, ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+
+ node->id = node_id;
+ node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? MRVL_NODE_PORT :
+ MRVL_NODE_QUEUE;
+
+ if (parent) {
+ node->parent = parent;
+ parent->refcnt++;
+ }
+
+ if (profile) {
+ node->profile = profile;
+ profile->refcnt++;
+ }
+
+ node->weight = weight;
+ node->stats_mask = params->stats_mask;
+
+ LIST_INSERT_HEAD(&priv->nodes, node, next);
+
+ return 0;
+}
+
+/**
+ * Delete a node.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id of the node.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node;
+
+ if (priv->ppio) {
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is already started\n");
+ }
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (node->refcnt)
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id is used\n");
+
+ if (node->parent)
+ node->parent->refcnt--;
+
+ if (node->profile)
+ node->profile->refcnt--;
+
+ LIST_REMOVE(node, next);
+ rte_free(node);
+
+ return 0;
+}
+
+/**
+ * Helper for suspending specific tx queue.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id used by this node.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int mrvl_node_suspend_one(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ int ret = dev->dev_ops->tx_queue_stop(dev, node_id);
+ if (ret)
+ return -rte_tm_error_set(error, ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to suspend a txq\n");
+
+ return 0;
+}
+
+/**
+ * Suspend a node.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id of the node.
+ * @param error Pointer to the error.
+ * returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_suspend(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node, *tmp;
+ int ret;
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (!node->parent) {
+ LIST_FOREACH(tmp, &priv->nodes, next) {
+ if (!tmp->parent)
+ continue;
+
+ if (node != tmp->parent)
+ continue;
+
+ ret = mrvl_node_suspend_one(dev, tmp->id, error);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ return mrvl_node_suspend_one(dev, node_id, error);
+}
+
+/**
+ * Resume a node.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id of the node.
+ * @param error Pointer to the error.
+ * returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_resume(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node;
+ int ret;
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+
+ if (!node->parent)
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Cannot suspend a port\n");
+
+ ret = dev->dev_ops->tx_queue_start(dev, node_id);
+ if (ret)
+ return -rte_tm_error_set(error, ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to resume a txq\n");
+ return 0;
+}
+
+/**
+ * Apply traffic manager hierarchy.
+ *
+ * @param dev Pointer to the device.
+ * @param clear_on_fail Flag indicating whether to do cleanup on the failure.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node;
+ int ret;
+
+ if (priv->ppio) {
+ ret = -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is already started\n");
+ goto out;
+ }
+
+ LIST_FOREACH(node, &priv->nodes, next) {
+ struct pp2_ppio_outq_params *p;
+
+ if (node->type == MRVL_NODE_PORT) {
+ if (!node->profile)
+ continue;
+
+ priv->ppio_params.rate_limit_enable = 1;
+ priv->ppio_params.rate_limit_params.cir =
+ node->profile->params.peak.rate * 8 / 1000;
+ priv->ppio_params.rate_limit_params.cbs =
+ node->profile->params.peak.size / 1000;
+
+ MRVL_LOG(INFO,
+ "Port rate limit overrides txqs rate limit");
+
+ continue;
+ }
+
+ if (node->id >= dev->data->nb_tx_queues) {
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID, NULL,
+ "Not enough txqs are configured\n");
+ goto out;
+ }
+
+ p = &priv->ppio_params.outqs_params.outqs_params[node->id];
+
+ if (node->weight) {
+ p->sched_mode = PP2_PPIO_SCHED_M_WRR;
+ p->weight = node->weight;
+ } else {
+ p->sched_mode = PP2_PPIO_SCHED_M_SP;
+ p->weight = 0;
+ }
+
+ if (node->profile) {
+ p->rate_limit_enable = 1;
+ /* convert Bytes/s to kilo bits/s */
+ p->rate_limit_params.cir =
+ node->profile->params.peak.rate * 8 / 1000;
+ /* convert bits to kilo bits */
+ p->rate_limit_params.cbs =
+ node->profile->params.peak.size / 1000;
+ } else {
+ p->rate_limit_enable = 0;
+ p->rate_limit_params.cir = 0;
+ p->rate_limit_params.cbs = 0;
+ }
+ }
+
+ /* reset to defaults in case applied tm hierarchy is empty */
+ if (LIST_EMPTY(&priv->nodes)) {
+ int i;
+
+ for (i = 0; i < priv->ppio_params.outqs_params.num_outqs; i++) {
+ struct pp2_ppio_outq_params *p =
+ &priv->ppio_params.outqs_params.outqs_params[i];
+
+ p->sched_mode = PP2_PPIO_SCHED_M_WRR;
+ p->weight = 0;
+ p->rate_limit_enable = 0;
+ p->rate_limit_params.cir = 0;
+ p->rate_limit_params.cbs = 0;
+ }
+ }
+
+ return 0;
+out:
+ if (clear_on_fail) {
+ mrvl_tm_deinit(dev);
+ mrvl_tm_init(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * Read statistics counters for current node.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id of the node.
+ * @param stats Pointer to the statistics counters.
+ * @param stats_mask Pointer to mask of enabled statistics counters
+ * that are retrieved.
+ * @param clear Flag indicating whether to clear statistics.
+ * Non-zero value clears statistics.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats, uint64_t *stats_mask,
+ int clear, struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node;
+ int ret;
+
+ if (!priv->ppio) {
+ return -rte_tm_error_set(error, EPERM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port is not started\n");
+ }
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (stats_mask)
+ *stats_mask = node->stats_mask;
+
+ if (!stats)
+ return 0;
+
+ memset(stats, 0, sizeof(*stats));
+
+ if (!node->parent) {
+ struct pp2_ppio_statistics s;
+
+ memset(&s, 0, sizeof(s));
+ ret = pp2_ppio_get_statistics(priv->ppio, &s, clear);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read port statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = s.tx_packets;
+
+ if (node->stats_mask & RTE_TM_STATS_N_BYTES)
+ stats->n_bytes = s.tx_bytes;
+ } else {
+ struct pp2_ppio_outq_statistics s;
+
+ memset(&s, 0, sizeof(s));
+ ret = pp2_ppio_outq_get_statistics(priv->ppio, node_id, &s,
+ clear);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read txq statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = s.deq_desc;
+ }
+
+ return 0;
+}
+
+/**
+ * Update node statistics.
+ *
+ * @param dev Pointer to the device.
+ * @param node_id Id of the node.
+ * @param stats_mask Bitmask of statistics counters to be enabled.
+ * @param error Pointer to the error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_node_stats_update(struct rte_eth_dev *dev, uint32_t node_id,
+ uint64_t stats_mask, struct rte_tm_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_tm_node *node;
+
+ node = mrvl_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (!node->parent) {
+ if (stats_mask & ~(RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
+ } else {
+ if (stats_mask & ~RTE_TM_STATS_N_PKTS)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested txq stats are not supported\n");
+ }
+
+ node->stats_mask = stats_mask;
+
+ return 0;
+}
+
+const struct rte_tm_ops mrvl_tm_ops = {
+ .node_type_get = mrvl_node_type_get,
+ .capabilities_get = mrvl_capabilities_get,
+ .level_capabilities_get = mrvl_level_capabilities_get,
+ .node_capabilities_get = mrvl_node_capabilities_get,
+ .shaper_profile_add = mrvl_shaper_profile_add,
+ .shaper_profile_delete = mrvl_shaper_profile_delete,
+ .node_add = mrvl_node_add,
+ .node_delete = mrvl_node_delete,
+ .node_suspend = mrvl_node_suspend,
+ .node_resume = mrvl_node_resume,
+ .hierarchy_commit = mrvl_hierarchy_commit,
+ .node_stats_update = mrvl_node_stats_update,
+ .node_stats_read = mrvl_node_stats_read,
+};
diff --git a/drivers/net/mvpp2/mrvl_tm.h b/drivers/net/mvpp2/mrvl_tm.h
new file mode 100644
index 00000000..9d81ede2
--- /dev/null
+++ b/drivers/net/mvpp2/mrvl_tm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MRVL_TM_H_
+#define _MRVL_TM_H_
+
+#include "mrvl_ethdev.h"
+
+int mrvl_tm_init(struct rte_eth_dev *dev);
+void mrvl_tm_deinit(struct rte_eth_dev *dev);
+
+#endif /* _MRVL_TM_H_ */
diff --git a/drivers/net/netvsc/Makefile b/drivers/net/netvsc/Makefile
index 3c713af3..71482591 100644
--- a/drivers/net/netvsc/Makefile
+++ b/drivers/net/netvsc/Makefile
@@ -15,6 +15,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rndis.c
SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_nvs.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_vf.c
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 78b842ba..aa38ee7a 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -14,7 +14,9 @@
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
+#include <rte_devargs.h>
#include <rte_malloc.h>
+#include <rte_kvargs.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_ether.h>
@@ -40,8 +42,7 @@
DEV_TX_OFFLOAD_VLAN_INSERT)
#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_CRC_STRIP)
+ DEV_RX_OFFLOAD_VLAN_STRIP)
int hn_logtype_init;
int hn_logtype_driver;
@@ -55,7 +56,7 @@ static const struct hn_xstats_name_off hn_stat_strings[] = {
{ "good_packets", offsetof(struct hn_stats, packets) },
{ "good_bytes", offsetof(struct hn_stats, bytes) },
{ "errors", offsetof(struct hn_stats, errors) },
- { "allocation_failed", offsetof(struct hn_stats, nomemory) },
+ { "ring full", offsetof(struct hn_stats, ring_full) },
{ "multicast_packets", offsetof(struct hn_stats, multicast) },
{ "broadcast_packets", offsetof(struct hn_stats, broadcast) },
{ "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
@@ -105,6 +106,10 @@ eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
}
eth_dev->device = &dev->device;
+
+ /* interrupt is simulated */
+ dev->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->intr_handle = &dev->intr_handle;
return eth_dev;
@@ -113,22 +118,66 @@ eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
static void
eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
{
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
/* free ether device */
rte_eth_dev_release_port(eth_dev);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
+ eth_dev->device = NULL;
+ eth_dev->intr_handle = NULL;
+}
- eth_dev->data->dev_private = NULL;
+/* handle "latency=X" from devargs */
+static int hn_set_latency(const char *key, const char *value, void *opaque)
+{
+ struct hn_data *hv = opaque;
+ char *endp = NULL;
+ unsigned long lat;
- /*
- * Secondary process will check the name to attach.
- * Clear this field to avoid attaching a released ports.
- */
- eth_dev->data->name[0] = '\0';
+ errno = 0;
+ lat = strtoul(value, &endp, 0);
- eth_dev->device = NULL;
- eth_dev->intr_handle = NULL;
+ if (*value == '\0' || *endp != '\0') {
+ PMD_DRV_LOG(ERR, "invalid parameter %s=%s", key, value);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "set latency %lu usec", lat);
+
+ hv->latency = lat * 1000; /* usec to nsec */
+ return 0;
+}
+
+/* Parse device arguments */
+static int hn_parse_args(const struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_devargs *devargs = dev->device->devargs;
+ static const char * const valid_keys[] = {
+ "latency",
+ NULL
+ };
+ struct rte_kvargs *kvlist;
+ int ret;
+
+ if (!devargs)
+ return 0;
+
+ PMD_INIT_LOG(DEBUG, "device args %s %s",
+ devargs->name, devargs->args);
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
+ if (!kvlist) {
+ PMD_DRV_LOG(NOTICE, "invalid parameters");
+ return -EINVAL;
+ }
+
+ ret = rte_kvargs_process(kvlist, "latency", hn_set_latency, hv);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Unable to process latency arg\n");
+
+ rte_kvargs_free(kvlist);
+ return ret;
}
/* Update link status.
@@ -136,9 +185,9 @@ eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
* means block this call until link is up.
* which is not worth supporting.
*/
-static int
+int
hn_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete)
+ int wait_to_complete)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_link link, old;
@@ -152,6 +201,8 @@ hn_dev_link_update(struct rte_eth_dev *dev,
hn_rndis_get_linkspeed(hv);
+ hn_vf_link_update(dev, wait_to_complete);
+
link = (struct rte_eth_link) {
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_autoneg = ETH_LINK_SPEED_FIXED,
@@ -190,6 +241,7 @@ static void hn_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_tx_queues = hv->max_queues;
hn_rndis_get_offload(hv, dev_info);
+ hn_vf_info_get(hv, dev_info);
}
static void
@@ -198,6 +250,7 @@ hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
struct hn_data *hv = dev->data->dev_private;
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
+ hn_vf_promiscuous_enable(dev);
}
static void
@@ -210,6 +263,7 @@ hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
if (dev->data->all_multicast)
filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
hn_rndis_set_rxfilter(hv, filter);
+ hn_vf_promiscuous_disable(dev);
}
static void
@@ -220,6 +274,7 @@ hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_BROADCAST);
+ hn_vf_allmulticast_enable(dev);
}
static void
@@ -229,6 +284,16 @@ hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
NDIS_PACKET_TYPE_BROADCAST);
+ hn_vf_allmulticast_disable(dev);
+}
+
+static int
+hn_dev_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ /* No filtering on the synthetic path, but can do it on VF */
+ return hn_vf_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
}
/* Setup shared rx/tx queue data */
@@ -264,6 +329,8 @@ static int hn_subchan_configure(struct hn_data *hv,
return err;
}
+ rte_vmbus_set_latency(hv->vmbus, new_sc, hv->latency);
+
retry = 0;
chn_index = rte_vmbus_sub_channel_index(new_sc);
if (chn_index == 0 || chn_index > hv->max_queues) {
@@ -338,7 +405,7 @@ static int hn_dev_configure(struct rte_eth_dev *dev)
}
}
- return 0;
+ return hn_vf_configure(dev, dev_conf);
}
static int hn_dev_stats_get(struct rte_eth_dev *dev,
@@ -346,6 +413,8 @@ static int hn_dev_stats_get(struct rte_eth_dev *dev,
{
unsigned int i;
+ hn_vf_stats_get(dev, stats);
+
for (i = 0; i < dev->data->nb_tx_queues; i++) {
const struct hn_tx_queue *txq = dev->data->tx_queues[i];
@@ -354,7 +423,7 @@ static int hn_dev_stats_get(struct rte_eth_dev *dev,
stats->opackets += txq->stats.packets;
stats->obytes += txq->stats.bytes;
- stats->oerrors += txq->stats.errors + txq->stats.nomemory;
+ stats->oerrors += txq->stats.errors;
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
stats->q_opackets[i] = txq->stats.packets;
@@ -371,7 +440,7 @@ static int hn_dev_stats_get(struct rte_eth_dev *dev,
stats->ipackets += rxq->stats.packets;
stats->ibytes += rxq->stats.bytes;
stats->ierrors += rxq->stats.errors;
- stats->imissed += rxq->ring_full;
+ stats->imissed += rxq->stats.ring_full;
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
stats->q_ipackets[i] = rxq->stats.packets;
@@ -405,22 +474,41 @@ hn_dev_stats_reset(struct rte_eth_dev *dev)
continue;
memset(&rxq->stats, 0, sizeof(struct hn_stats));
- rxq->ring_full = 0;
}
}
+static void
+hn_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ hn_dev_stats_reset(dev);
+ hn_vf_xstats_reset(dev);
+}
+
+static int
+hn_dev_xstats_count(struct rte_eth_dev *dev)
+{
+ int ret, count;
+
+ count = dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings);
+ count += dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+
+ ret = hn_vf_xstats_get_names(dev, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ return count + ret;
+}
+
static int
hn_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int limit)
+ unsigned int limit)
{
unsigned int i, t, count = 0;
-
- PMD_INIT_FUNC_TRACE();
+ int ret;
if (!xstats_names)
- return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
- + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+ return hn_dev_xstats_count(dev);
/* Note: limit checked in rte_eth_xstats_names() */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -429,6 +517,9 @@ hn_dev_xstats_get_names(struct rte_eth_dev *dev,
if (!txq)
continue;
+ if (count >= limit)
+ break;
+
for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
snprintf(xstats_names[count++].name,
RTE_ETH_XSTATS_NAME_SIZE,
@@ -441,6 +532,9 @@ hn_dev_xstats_get_names(struct rte_eth_dev *dev,
if (!rxq)
continue;
+ if (count >= limit)
+ break;
+
for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
snprintf(xstats_names[count++].name,
RTE_ETH_XSTATS_NAME_SIZE,
@@ -448,7 +542,12 @@ hn_dev_xstats_get_names(struct rte_eth_dev *dev,
hn_stat_strings[t].name);
}
- return count;
+ ret = hn_vf_xstats_get_names(dev, xstats_names + count,
+ limit - count);
+ if (ret < 0)
+ return ret;
+
+ return count + ret;
}
static int
@@ -457,11 +556,9 @@ hn_dev_xstats_get(struct rte_eth_dev *dev,
unsigned int n)
{
unsigned int i, t, count = 0;
-
- const unsigned int nstats =
- dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
- + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+ const unsigned int nstats = hn_dev_xstats_count(dev);
const char *stats;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -492,26 +589,33 @@ hn_dev_xstats_get(struct rte_eth_dev *dev,
(stats + hn_stat_strings[t].offset);
}
- return count;
+ ret = hn_vf_xstats_get(dev, xstats + count, n - count);
+ if (ret < 0)
+ return ret;
+
+ return count + ret;
}
static int
hn_dev_start(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
+ int error;
PMD_INIT_FUNC_TRACE();
- /* check if lsc interrupt feature is enabled */
- if (dev->data->dev_conf.intr_conf.lsc) {
- PMD_DRV_LOG(ERR, "link status not supported yet");
- return -ENOTSUP;
- }
+ error = hn_rndis_set_rxfilter(hv,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+ if (error)
+ return error;
+
+ error = hn_vf_start(dev);
+ if (error)
+ hn_rndis_set_rxfilter(hv, 0);
- return hn_rndis_set_rxfilter(hv,
- NDIS_PACKET_TYPE_BROADCAST |
- NDIS_PACKET_TYPE_ALL_MULTICAST |
- NDIS_PACKET_TYPE_DIRECTED);
+ return error;
}
static void
@@ -522,12 +626,15 @@ hn_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
hn_rndis_set_rxfilter(hv, 0);
+ hn_vf_stop(dev);
}
static void
hn_dev_close(struct rte_eth_dev *dev __rte_unused)
{
PMD_INIT_LOG(DEBUG, "close");
+
+ hn_vf_close(dev);
}
static const struct eth_dev_ops hn_eth_dev_ops = {
@@ -536,22 +643,23 @@ static const struct eth_dev_ops hn_eth_dev_ops = {
.dev_stop = hn_dev_stop,
.dev_close = hn_dev_close,
.dev_infos_get = hn_dev_info_get,
- .txq_info_get = hn_dev_tx_queue_info,
- .rxq_info_get = hn_dev_rx_queue_info,
+ .dev_supported_ptypes_get = hn_vf_supported_ptypes,
.promiscuous_enable = hn_dev_promiscuous_enable,
.promiscuous_disable = hn_dev_promiscuous_disable,
.allmulticast_enable = hn_dev_allmulticast_enable,
.allmulticast_disable = hn_dev_allmulticast_disable,
+ .set_mc_addr_list = hn_dev_mc_addr_list,
.tx_queue_setup = hn_dev_tx_queue_setup,
.tx_queue_release = hn_dev_tx_queue_release,
+ .tx_done_cleanup = hn_dev_tx_done_cleanup,
.rx_queue_setup = hn_dev_rx_queue_setup,
.rx_queue_release = hn_dev_rx_queue_release,
.link_update = hn_dev_link_update,
.stats_get = hn_dev_stats_get,
+ .stats_reset = hn_dev_stats_reset,
.xstats_get = hn_dev_xstats_get,
.xstats_get_names = hn_dev_xstats_get_names,
- .stats_reset = hn_dev_stats_reset,
- .xstats_reset = hn_dev_stats_reset,
+ .xstats_reset = hn_dev_xstats_reset,
};
/*
@@ -623,12 +731,27 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
hv->port_id = eth_dev->data->port_id;
+ hv->latency = HN_CHAN_LATENCY_NS;
+
+ err = hn_parse_args(eth_dev);
+ if (err)
+ return err;
+
+ strlcpy(hv->owner.name, eth_dev->device->name,
+ RTE_ETH_MAX_OWNER_NAME_LEN);
+ err = rte_eth_dev_owner_new(&hv->owner.id);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Can not get owner id");
+ return err;
+ }
/* Initialize primary channel input for control operations */
err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
if (err)
return err;
+ rte_vmbus_set_latency(hv->vmbus, hv->channels[0], hv->latency);
+
hv->primary = hn_rx_queue_alloc(hv, 0,
eth_dev->device->numa_node);
@@ -657,6 +780,15 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
+ /* If VF was reported but not added, do it now */
+ if (hv->vf_present && !hv->vf_dev) {
+ PMD_INIT_LOG(DEBUG, "Adding VF device");
+
+ err = hn_vf_add(eth_dev, hv);
+ if (err)
+ goto failed;
+ }
+
return 0;
failed:
@@ -686,8 +818,7 @@ eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
hn_detach(hv);
rte_vmbus_chan_close(hv->primary->chan);
rte_free(hv->primary);
-
- eth_dev->data->mac_addrs = NULL;
+ rte_eth_dev_owner_delete(hv->owner.id);
return 0;
}
diff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c
index 77d3b839..9690c5f8 100644
--- a/drivers/net/netvsc/hn_nvs.c
+++ b/drivers/net/netvsc/hn_nvs.c
@@ -279,14 +279,13 @@ hn_nvs_conn_chim(struct hn_data *hv)
NVS_TYPE_CHIM_CONNRESP);
if (error) {
PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
- goto cleanup;
+ return error;
}
if (resp.status != NVS_STATUS_OK) {
PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
resp.status);
- error = -EIO;
- goto cleanup;
+ return -EIO;
}
sectsz = resp.sectsz;
@@ -295,7 +294,8 @@ hn_nvs_conn_chim(struct hn_data *hv)
PMD_DRV_LOG(NOTICE,
"invalid chimney sending buffer section size: %u",
sectsz);
- return 0;
+ error = -EINVAL;
+ goto cleanup;
}
hv->chim_szmax = sectsz;
@@ -304,11 +304,6 @@ hn_nvs_conn_chim(struct hn_data *hv)
PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
len, hv->chim_szmax, hv->chim_cnt);
- if (len % hv->chim_szmax != 0) {
- PMD_DRV_LOG(NOTICE,
- "chimney sending sections are not properly aligned");
- }
-
/* Done! */
return 0;
@@ -537,10 +532,19 @@ void
hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
{
struct hn_nvs_datapath dp;
+ int error;
+
+ PMD_DRV_LOG(DEBUG, "set datapath %s",
+ path ? "VF" : "Synthetic");
memset(&dp, 0, sizeof(dp));
dp.type = NVS_TYPE_SET_DATAPATH;
dp.active_path = path;
- hn_nvs_req_send(hv, &dp, sizeof(dp));
+ error = hn_nvs_req_send(hv, &dp, sizeof(dp));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send set datapath failed: %d",
+ error);
+ }
}
diff --git a/drivers/net/netvsc/hn_nvs.h b/drivers/net/netvsc/hn_nvs.h
index 984a9c11..2563fd8d 100644
--- a/drivers/net/netvsc/hn_nvs.h
+++ b/drivers/net/netvsc/hn_nvs.h
@@ -105,6 +105,12 @@ struct hn_nvs_ndis_init {
uint8_t rsvd[28];
} __rte_packed;
+struct hn_nvs_vf_association {
+ uint32_t type; /* NVS_TYPE_VFASSOC_NOTE */
+ uint32_t allocated;
+ uint32_t serial;
+} __rte_packed;
+
#define NVS_DATAPATH_SYNTHETIC 0
#define NVS_DATAPATH_VF 1
@@ -207,6 +213,9 @@ void hn_nvs_detach(struct hn_data *hv);
void hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid);
int hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch);
void hn_nvs_set_datapath(struct hn_data *hv, uint32_t path);
+void hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
+ const struct vmbus_chanpkt_hdr *hdr,
+ const void *data);
static inline int
hn_nvs_send(struct vmbus_channel *chan, uint16_t flags,
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
index bde33969..0134ecb6 100644
--- a/drivers/net/netvsc/hn_rndis.c
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -11,6 +11,7 @@
#include <errno.h>
#include <unistd.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
@@ -281,7 +282,7 @@ static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan,
&nvs_rndis, sizeof(nvs_rndis), 0U, NULL);
}
-void hn_rndis_link_status(struct hn_data *hv __rte_unused, const void *msg)
+void hn_rndis_link_status(struct rte_eth_dev *dev, const void *msg)
{
const struct rndis_status_msg *indicate = msg;
@@ -290,15 +291,19 @@ void hn_rndis_link_status(struct hn_data *hv __rte_unused, const void *msg)
PMD_DRV_LOG(DEBUG, "link status %#x", indicate->status);
switch (indicate->status) {
- case RNDIS_STATUS_LINK_SPEED_CHANGE:
case RNDIS_STATUS_NETWORK_CHANGE:
case RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG:
/* ignore not in DPDK API */
break;
+ case RNDIS_STATUS_LINK_SPEED_CHANGE:
case RNDIS_STATUS_MEDIA_CONNECT:
case RNDIS_STATUS_MEDIA_DISCONNECT:
- /* TODO handle as LSC interrupt */
+ if (dev->data->dev_conf.intr_conf.lsc &&
+ hn_dev_link_update(dev, 0) == 0)
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
break;
default:
PMD_DRV_LOG(NOTICE, "unknown RNDIS indication: %#x",
@@ -382,7 +387,7 @@ static int hn_rndis_exec1(struct hn_data *hv,
if (comp) {
/* Poll primary channel until response received */
while (hv->rndis_pending == rid)
- hn_process_events(hv, 0);
+ hn_process_events(hv, 0, 1);
memcpy(comp, hv->rndis_resp, comp_len);
}
@@ -892,8 +897,7 @@ int hn_rndis_get_offload(struct hn_data *hv,
== HN_NDIS_LSOV2_CAP_IP6)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
@@ -909,6 +913,37 @@ int hn_rndis_get_offload(struct hn_data *hv,
return 0;
}
+uint32_t
+hn_rndis_get_ptypes(struct hn_data *hv)
+{
+ struct ndis_offload hwcaps;
+ uint32_t ptypes;
+ int error;
+
+ memset(&hwcaps, 0, sizeof(hwcaps));
+
+ error = hn_rndis_query_hwcaps(hv, &hwcaps);
+ if (error) {
+ PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error);
+ return RTE_PTYPE_L2_ETHER;
+ }
+
+ ptypes = RTE_PTYPE_L2_ETHER;
+
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
+ ptypes |= RTE_PTYPE_L3_IPV4;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) ||
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
+ ptypes |= RTE_PTYPE_L4_TCP;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) ||
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
+ ptypes |= RTE_PTYPE_L4_UDP;
+
+ return ptypes;
+}
+
int
hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter)
{
diff --git a/drivers/net/netvsc/hn_rndis.h b/drivers/net/netvsc/hn_rndis.h
index 89e2e6ba..319b497a 100644
--- a/drivers/net/netvsc/hn_rndis.h
+++ b/drivers/net/netvsc/hn_rndis.h
@@ -6,7 +6,7 @@ struct hn_data;
void hn_rndis_receive_response(struct hn_data *hv,
const void *data, uint32_t len);
-void hn_rndis_link_status(struct hn_data *hv, const void *data);
+void hn_rndis_link_status(struct rte_eth_dev *dev, const void *msg);
int hn_rndis_attach(struct hn_data *hv);
void hn_rndis_detach(struct hn_data *hv);
int hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr);
@@ -24,6 +24,7 @@ int hn_rndis_query_rsscaps(struct hn_data *hv,
unsigned int *rxr_cnt0);
int hn_rndis_conf_rss(struct hn_data *hv,
const struct rte_eth_rss_conf *rss_conf);
+uint32_t hn_rndis_get_ptypes(struct hn_data *hv);
#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
void hn_rndis_dump(const void *buf);
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
index 02ef27e3..f4a36641 100644
--- a/drivers/net/netvsc/hn_rxtx.c
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -10,6 +10,7 @@
#include <errno.h>
#include <unistd.h>
#include <strings.h>
+#include <malloc.h>
#include <rte_ethdev.h>
#include <rte_memcpy.h>
@@ -216,6 +217,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct hn_data *hv = dev->data->dev_private;
struct hn_tx_queue *txq;
uint32_t tx_free_thresh;
+ int err;
PMD_INIT_FUNC_TRACE();
@@ -245,8 +247,14 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
hn_reset_txagg(txq);
- dev->data->tx_queues[queue_idx] = txq;
+ err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, tx_conf);
+ if (err) {
+ rte_free(txq);
+ return err;
+ }
+ dev->data->tx_queues[queue_idx] = txq;
return 0;
}
@@ -269,17 +277,6 @@ hn_dev_tx_queue_release(void *arg)
rte_free(txq);
}
-void
-hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
- struct rte_eth_txq_info *qinfo)
-{
- struct hn_data *hv = dev->data->dev_private;
- struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx];
-
- qinfo->conf.tx_free_thresh = txq->free_thresh;
- qinfo->nb_desc = hv->tx_pool->size;
-}
-
static void
hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
@@ -533,7 +530,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
hn_update_packet_stats(&rxq->stats, m);
if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
- ++rxq->ring_full;
+ ++rxq->stats.ring_full;
rte_pktmbuf_free(m);
}
}
@@ -600,7 +597,7 @@ error:
}
static void
-hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
+hn_rndis_receive(struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
{
const struct rndis_msghdr *hdr = buf;
@@ -612,7 +609,7 @@ hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
break;
case RNDIS_INDICATE_STATUS_MSG:
- hn_rndis_link_status(rxq->hv, buf);
+ hn_rndis_link_status(dev, buf);
break;
case RNDIS_INITIALIZE_CMPLT:
@@ -712,22 +709,59 @@ hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
hn_rx_buf_release(rxb);
}
+/*
+ * Called when NVS inband events are received.
+ * Send up a two part message with port_id and the NVS message
+ * to the pipe to the netvsc-vf-event control thread.
+ */
+static void hn_nvs_handle_notify(struct rte_eth_dev *dev,
+ const struct vmbus_chanpkt_hdr *pkt,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case NVS_TYPE_TXTBL_NOTE:
+ /* Transmit indirection table has locking problems
+ * in DPDK and therefore not implemented
+ */
+ PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table");
+ break;
+
+ case NVS_TYPE_VFASSOC_NOTE:
+ hn_nvs_handle_vfassoc(dev, pkt, data);
+ break;
+
+ default:
+ PMD_DRV_LOG(INFO,
+ "got notify, nvs type %u", hdr->type);
+ }
+}
+
struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
uint16_t queue_id,
unsigned int socket_id)
{
struct hn_rx_queue *rxq;
- rxq = rte_zmalloc_socket("HN_RXQ",
- sizeof(*rxq) + HN_RXQ_EVENT_DEFAULT,
+ rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq),
RTE_CACHE_LINE_SIZE, socket_id);
- if (rxq) {
- rxq->hv = hv;
- rxq->chan = hv->channels[queue_id];
- rte_spinlock_init(&rxq->ring_lock);
- rxq->port_id = hv->port_id;
- rxq->queue_id = queue_id;
+ if (!rxq)
+ return NULL;
+
+ rxq->hv = hv;
+ rxq->chan = hv->channels[queue_id];
+ rte_spinlock_init(&rxq->ring_lock);
+ rxq->port_id = hv->port_id;
+ rxq->queue_id = queue_id;
+ rxq->event_sz = HN_RXQ_EVENT_DEFAULT;
+ rxq->event_buf = rte_malloc_socket("HN_EVENTS", HN_RXQ_EVENT_DEFAULT,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->event_buf) {
+ rte_free(rxq);
+ return NULL;
}
+
return rxq;
}
@@ -735,13 +769,14 @@ int
hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct hn_data *hv = dev->data->dev_private;
char ring_name[RTE_RING_NAMESIZE];
struct hn_rx_queue *rxq;
unsigned int count;
+ int error = -ENOMEM;
PMD_INIT_FUNC_TRACE();
@@ -771,6 +806,11 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (!rxq->rx_ring)
goto fail;
+ error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, rx_conf, mp);
+ if (error)
+ goto fail;
+
dev->data->rx_queues[queue_idx] = rxq;
return 0;
@@ -778,7 +818,7 @@ fail:
rte_ring_free(rxq->rx_ring);
rte_free(rxq->event_buf);
rte_free(rxq);
- return -ENOMEM;
+ return error;
}
void
@@ -795,77 +835,79 @@ hn_dev_rx_queue_release(void *arg)
rxq->rx_ring = NULL;
rxq->mb_pool = NULL;
+ hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
+
+ /* Keep primary queue to allow for control operations */
if (rxq != rxq->hv->primary) {
rte_free(rxq->event_buf);
rte_free(rxq);
}
}
-void
-hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
- struct rte_eth_rxq_info *qinfo)
-{
- struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx];
-
- qinfo->mp = rxq->mb_pool;
- qinfo->scattered_rx = 1;
- qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring);
-}
-
-static void
-hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
- const void *data)
+int
+hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
{
- const struct hn_nvs_hdr *hdr = data;
-
- if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) {
- PMD_DRV_LOG(ERR, "invalid nvs notify");
- return;
- }
+ struct hn_tx_queue *txq = arg;
- PMD_DRV_LOG(INFO,
- "got notify, nvs type %u", hdr->type);
+ return hn_process_events(txq->hv, txq->queue_id, free_cnt);
}
/*
* Process pending events on the channel.
* Called from both Rx queue poll and Tx cleanup
*/
-void hn_process_events(struct hn_data *hv, uint16_t queue_id)
+uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
+ uint32_t tx_limit)
{
struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
struct hn_rx_queue *rxq;
uint32_t bytes_read = 0;
+ uint32_t tx_done = 0;
int ret = 0;
rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
/* If no pending data then nothing to do */
if (rte_vmbus_chan_rx_empty(rxq->chan))
- return;
+ return 0;
/*
* Since channel is shared between Rx and TX queue need to have a lock
* since DPDK does not force same CPU to be used for Rx/Tx.
*/
if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
- return;
+ return 0;
for (;;) {
const struct vmbus_chanpkt_hdr *pkt;
- uint32_t len = HN_RXQ_EVENT_DEFAULT;
+ uint32_t len = rxq->event_sz;
const void *data;
+retry:
ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
if (ret == -EAGAIN)
break; /* ring is empty */
- else if (ret == -ENOBUFS)
- rte_exit(EXIT_FAILURE, "event buffer not big enough (%u < %u)",
- HN_RXQ_EVENT_DEFAULT, len);
- else if (ret <= 0)
+ if (unlikely(ret == -ENOBUFS)) {
+ /* event buffer not large enough to read ring */
+
+ PMD_DRV_LOG(DEBUG,
+ "event buffer expansion (need %u)", len);
+ rxq->event_sz = len + len / 4;
+ rxq->event_buf = rte_realloc(rxq->event_buf, rxq->event_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq->event_buf)
+ goto retry;
+ /* out of memory, no more events now */
+ rxq->event_sz = 0;
+ break;
+ }
+
+ if (unlikely(ret <= 0)) {
+ /* This indicates a failure to communicate (or worse) */
rte_exit(EXIT_FAILURE,
"vmbus ring buffer error: %d", ret);
+ }
bytes_read += ret;
pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
@@ -873,6 +915,7 @@ void hn_process_events(struct hn_data *hv, uint16_t queue_id)
switch (pkt->type) {
case VMBUS_CHANPKT_TYPE_COMP:
+ ++tx_done;
hn_nvs_handle_comp(dev, queue_id, pkt, data);
break;
@@ -881,7 +924,7 @@ void hn_process_events(struct hn_data *hv, uint16_t queue_id)
break;
case VMBUS_CHANPKT_TYPE_INBAND:
- hn_nvs_handle_notify(pkt, data);
+ hn_nvs_handle_notify(dev, pkt, data);
break;
default:
@@ -889,6 +932,9 @@ void hn_process_events(struct hn_data *hv, uint16_t queue_id)
break;
}
+ if (tx_limit && tx_done >= tx_limit)
+ break;
+
if (rxq->rx_ring && rte_ring_full(rxq->rx_ring))
break;
}
@@ -897,6 +943,8 @@ void hn_process_events(struct hn_data *hv, uint16_t queue_id)
rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
rte_spinlock_unlock(&rxq->ring_lock);
+
+ return tx_done;
}
static void hn_append_to_chim(struct hn_tx_queue *txq,
@@ -967,7 +1015,7 @@ static struct hn_txdesc *hn_new_txd(struct hn_data *hv,
struct hn_txdesc *txd;
if (rte_mempool_get(hv->tx_pool, (void **)&txd)) {
- ++txq->stats.nomemory;
+ ++txq->stats.ring_full;
PMD_TX_LOG(DEBUG, "tx pool exhausted!");
return NULL;
}
@@ -1235,7 +1283,9 @@ uint16_t
hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct hn_tx_queue *txq = ptxq;
+ uint16_t queue_id = txq->queue_id;
struct hn_data *hv = txq->hv;
+ struct rte_eth_dev *vf_dev;
bool need_sig = false;
uint16_t nb_tx;
int ret;
@@ -1243,8 +1293,17 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (unlikely(hv->closed))
return 0;
+ /* Transmit over VF if present and up */
+ vf_dev = hv->vf_dev;
+ rte_compiler_barrier();
+ if (vf_dev && vf_dev->data->dev_started) {
+ void *sub_q = vf_dev->data->tx_queues[queue_id];
+
+ return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+ }
+
if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh)
- hn_process_events(hv, txq->queue_id);
+ hn_process_events(hv, txq->queue_id, 0);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *m = tx_pkts[nb_tx];
@@ -1264,7 +1323,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (unlikely(!pkt))
break;
- hn_encap(pkt, txq->queue_id, m);
+ hn_encap(pkt, queue_id, m);
hn_append_to_chim(txq, pkt, m);
rte_pktmbuf_free(m);
@@ -1291,7 +1350,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txd->data_size += m->pkt_len;
++txd->packets;
- hn_encap(pkt, txq->queue_id, m);
+ hn_encap(pkt, queue_id, m);
ret = hn_xmit_sg(txq, txd, m, &need_sig);
if (unlikely(ret != 0)) {
@@ -1320,15 +1379,36 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct hn_rx_queue *rxq = prxq;
struct hn_data *hv = rxq->hv;
+ struct rte_eth_dev *vf_dev;
+ uint16_t nb_rcv;
if (unlikely(hv->closed))
return 0;
- /* If ring is empty then process more */
- if (rte_ring_count(rxq->rx_ring) < nb_pkts)
- hn_process_events(hv, rxq->queue_id);
+ vf_dev = hv->vf_dev;
+ rte_compiler_barrier();
+
+ if (vf_dev && vf_dev->data->dev_started) {
+ /* Normally, with SR-IOV the ring buffer will be empty */
+ hn_process_events(hv, rxq->queue_id, 0);
+
+ /* Get mbufs some bufs off of staging ring */
+ nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
+ (void **)rx_pkts,
+ nb_pkts / 2, NULL);
+ /* And rest off of VF */
+ nb_rcv += rte_eth_rx_burst(vf_dev->data->port_id,
+ rxq->queue_id,
+ rx_pkts + nb_rcv, nb_pkts - nb_rcv);
+ } else {
+ /* If receive ring is not full then get more */
+ if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+ hn_process_events(hv, rxq->queue_id, 0);
+
+ nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
+ (void **)rx_pkts,
+ nb_pkts, NULL);
+ }
- /* Get mbufs off staging ring */
- return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,
- nb_pkts, NULL);
+ return nb_rcv;
}
diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
index f7ff8585..e1072c7c 100644
--- a/drivers/net/netvsc/hn_var.h
+++ b/drivers/net/netvsc/hn_var.h
@@ -20,6 +20,9 @@
/* Retry interval */
#define HN_CHAN_INTERVAL_US 100
+/* Host monitor interval */
+#define HN_CHAN_LATENCY_NS 50000
+
/* Buffers need to be aligned */
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
@@ -36,7 +39,7 @@ struct hn_stats {
uint64_t packets;
uint64_t bytes;
uint64_t errors;
- uint64_t nomemory;
+ uint64_t ring_full;
uint64_t multicast;
uint64_t broadcast;
/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
@@ -75,9 +78,8 @@ struct hn_rx_queue {
uint16_t port_id;
uint16_t queue_id;
struct hn_stats stats;
- uint64_t ring_full;
- uint8_t event_buf[];
+ void *event_buf;
};
@@ -92,8 +94,11 @@ struct hn_rx_bufinfo {
struct hn_data {
struct rte_vmbus_device *vmbus;
struct hn_rx_queue *primary;
+ struct rte_eth_dev *vf_dev; /* Subordinate device */
+ rte_spinlock_t vf_lock;
uint16_t port_id;
bool closed;
+ bool vf_present;
uint32_t link_status;
uint32_t link_speed;
@@ -110,6 +115,7 @@ struct hn_data {
uint32_t chim_szmax; /* Max size per buffer */
uint32_t chim_cnt; /* Max packets per buffer */
+ uint32_t latency;
uint32_t nvs_ver;
uint32_t ndis_ver;
uint32_t rndis_agg_size;
@@ -121,6 +127,10 @@ struct hn_data {
uint8_t rndis_resp[256];
struct ether_addr mac_addr;
+
+ struct rte_eth_dev_owner owner;
+ struct rte_intr_handle vf_intr;
+
struct vmbus_channel *channels[HN_MAX_CHANNELS];
};
@@ -130,7 +140,8 @@ hn_primary_chan(const struct hn_data *hv)
return hv->channels[0];
}
-void hn_process_events(struct hn_data *hv, uint16_t queue_id);
+uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
+ uint32_t tx_limit);
uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -138,12 +149,14 @@ uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
int hn_tx_pool_init(struct rte_eth_dev *dev);
+int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
void hn_dev_tx_queue_release(void *arg);
void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
struct rte_eth_txq_info *qinfo);
+int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
uint16_t queue_id,
@@ -154,5 +167,46 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
void hn_dev_rx_queue_release(void *arg);
-void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
- struct rte_eth_rxq_info *qinfo);
+
+void hn_vf_info_get(struct hn_data *hv,
+ struct rte_eth_dev_info *info);
+int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
+int hn_vf_configure(struct rte_eth_dev *dev,
+ const struct rte_eth_conf *dev_conf);
+const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
+int hn_vf_start(struct rte_eth_dev *dev);
+void hn_vf_reset(struct rte_eth_dev *dev);
+void hn_vf_stop(struct rte_eth_dev *dev);
+void hn_vf_close(struct rte_eth_dev *dev);
+
+void hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
+void hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
+void hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
+void hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
+int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
+int hn_vf_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
+int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
+
+int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+void hn_vf_stats_reset(struct rte_eth_dev *dev);
+int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size);
+int hn_vf_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n);
+void hn_vf_xstats_reset(struct rte_eth_dev *dev);
diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c
new file mode 100644
index 00000000..7a84ad8c
--- /dev/null
+++ b/drivers/net/netvsc/hn_vf.c
@@ -0,0 +1,549 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/fcntl.h>
+#include <sys/uio.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_lcore.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_nvs.h"
+
+/* Search for VF with matching MAC address, return port id */
+static int hn_vf_match(const struct rte_eth_dev *dev)
+{
+ const struct ether_addr *mac = dev->data->mac_addrs;
+ char buf[32];
+ int i;
+
+ ether_format_addr(buf, sizeof(buf), mac);
+ RTE_ETH_FOREACH_DEV(i) {
+ const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
+ const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
+
+ if (vf_dev == dev)
+ continue;
+
+ ether_format_addr(buf, sizeof(buf), vf_mac);
+ if (is_same_ether_addr(mac, vf_mac))
+ return i;
+ }
+ return -ENOENT;
+}
+
+/*
+ * Attach new PCI VF device and return the port_id
+ */
+static int hn_vf_attach(struct hn_data *hv, uint16_t port_id,
+ struct rte_eth_dev **vf_dev)
+{
+ struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
+ int ret;
+
+ ret = rte_eth_dev_owner_get(port_id, &owner);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
+ return ret;
+ }
+
+ if (owner.id != RTE_ETH_DEV_NO_OWNER) {
+ PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
+ port_id, owner.name);
+ return -EBUSY;
+ }
+
+ ret = rte_eth_dev_owner_set(port_id, &hv->owner);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
+ return ret;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
+ rte_smp_wmb();
+ *vf_dev = &rte_eth_devices[port_id];
+ return 0;
+}
+
+/* Add new VF device to synthetic device */
+int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
+{
+ int port, err;
+
+ port = hn_vf_match(dev);
+ if (port < 0) {
+ PMD_DRV_LOG(NOTICE, "No matching MAC found");
+ return port;
+ }
+
+ rte_spinlock_lock(&hv->vf_lock);
+ if (hv->vf_dev) {
+ PMD_DRV_LOG(ERR, "VF already attached");
+ err = -EBUSY;
+ } else {
+ err = hn_vf_attach(hv, port, &hv->vf_dev);
+ }
+
+ if (err == 0) {
+ dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ hv->vf_intr = (struct rte_intr_handle) {
+ .fd = -1,
+ .type = RTE_INTR_HANDLE_EXT,
+ };
+ dev->intr_handle = &hv->vf_intr;
+ hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
+ }
+ rte_spinlock_unlock(&hv->vf_lock);
+
+ return err;
+}
+
+/* Remove new VF device */
+static void hn_vf_remove(struct hn_data *hv)
+{
+ struct rte_eth_dev *vf_dev;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (!vf_dev) {
+ PMD_DRV_LOG(ERR, "VF path not active");
+ rte_spinlock_unlock(&hv->vf_lock);
+ return;
+ }
+
+ /* Stop incoming packets from arriving on VF */
+ hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
+ hv->vf_dev = NULL;
+
+ /* Give back ownership */
+ rte_eth_dev_owner_unset(vf_dev->data->port_id, hv->owner.id);
+ rte_spinlock_unlock(&hv->vf_lock);
+}
+
+/* Handle VF association message from host */
+void
+hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
+ const struct vmbus_chanpkt_hdr *hdr,
+ const void *data)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ const struct hn_nvs_vf_association *vf_assoc = data;
+
+ if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
+ PMD_DRV_LOG(ERR, "invalid vf association NVS");
+ return;
+ }
+
+ PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
+ vf_assoc->serial,
+ vf_assoc->allocated ? "add to" : "remove from",
+ dev->data->port_id);
+
+ hv->vf_present = vf_assoc->allocated;
+
+ if (dev->state != RTE_ETH_DEV_ATTACHED)
+ return;
+
+ if (vf_assoc->allocated)
+ hn_vf_add(dev, hv);
+ else
+ hn_vf_remove(hv);
+}
+
+/*
+ * Merge the info from the VF and synthetic path.
+ * use the default config of the VF
+ * and the minimum number of queues and buffer sizes.
+ */
+static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
+ struct rte_eth_dev_info *info)
+{
+ struct rte_eth_dev_info vf_info;
+
+ rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
+
+ info->speed_capa = vf_info.speed_capa;
+ info->default_rxportconf = vf_info.default_rxportconf;
+ info->default_txportconf = vf_info.default_txportconf;
+
+ info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
+ info->max_rx_queues);
+ info->rx_offload_capa &= vf_info.rx_offload_capa;
+ info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
+ info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
+
+ info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
+ info->max_tx_queues);
+ info->tx_offload_capa &= vf_info.tx_offload_capa;
+ info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
+
+ info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
+ info->min_rx_bufsize);
+ info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
+ info->max_rx_pktlen);
+}
+
+void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
+{
+ struct rte_eth_dev *vf_dev;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ hn_vf_info_merge(vf_dev, info);
+ rte_spinlock_unlock(&hv->vf_lock);
+}
+
+int hn_vf_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev && vf_dev->dev_ops->link_update)
+ ret = (*vf_dev->dev_ops->link_update)(dev, wait_to_complete);
+ rte_spinlock_unlock(&hv->vf_lock);
+
+ return ret;
+}
+
+/* called when VF has link state interrupts enabled */
+static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
+ enum rte_eth_event_type event,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct rte_eth_dev *dev = cb_arg;
+
+ if (event != RTE_ETH_EVENT_INTR_LSC)
+ return 0;
+
+ /* if link state has changed pass on */
+ if (hn_dev_link_update(dev, 0) == 0)
+ return 0; /* no change */
+
+ return _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+}
+
+static int _hn_vf_configure(struct rte_eth_dev *dev,
+ struct rte_eth_dev *vf_dev,
+ const struct rte_eth_conf *dev_conf)
+{
+ struct rte_eth_conf vf_conf = *dev_conf;
+ uint16_t vf_port = vf_dev->data->port_id;
+ int ret;
+
+ if (dev_conf->intr_conf.lsc &&
+ (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
+ PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
+ vf_port);
+ vf_conf.intr_conf.lsc = 1;
+ } else {
+ PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
+ vf_port);
+ vf_conf.intr_conf.lsc = 0;
+ }
+
+ ret = rte_eth_dev_configure(vf_port,
+ dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues,
+ &vf_conf);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "VF configuration failed: %d", ret);
+ } else if (vf_conf.intr_conf.lsc) {
+ ret = rte_eth_dev_callback_register(vf_port,
+ RTE_ETH_DEV_INTR_LSC,
+ hn_vf_lsc_event, dev);
+ if (ret)
+ PMD_DRV_LOG(ERR,
+ "Failed to register LSC callback for VF %u",
+ vf_port);
+ }
+ return ret;
+}
+
+/*
+ * Configure VF if present.
+ * Force VF to have same number of queues as synthetic device
+ */
+int hn_vf_configure(struct rte_eth_dev *dev,
+ const struct rte_eth_conf *dev_conf)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ ret = _hn_vf_configure(dev, vf_dev, dev_conf);
+ rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
+}
+
+const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ const uint32_t *ptypes = NULL;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
+ ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
+ rte_spinlock_unlock(&hv->vf_lock);
+
+ return ptypes;
+}
+
+int hn_vf_start(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ ret = rte_eth_dev_start(vf_dev->data->port_id);
+ rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
+}
+
+void hn_vf_stop(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ rte_eth_dev_stop(vf_dev->data->port_id);
+ rte_spinlock_unlock(&hv->vf_lock);
+}
+
+/* If VF is present, then cascade configuration down */
+#define VF_ETHDEV_FUNC(dev, func) \
+ { \
+ struct hn_data *hv = (dev)->data->dev_private; \
+ struct rte_eth_dev *vf_dev; \
+ rte_spinlock_lock(&hv->vf_lock); \
+ vf_dev = hv->vf_dev; \
+ if (vf_dev) \
+ func(vf_dev->data->port_id); \
+ rte_spinlock_unlock(&hv->vf_lock); \
+ }
+
+void hn_vf_reset(struct rte_eth_dev *dev)
+{
+ VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
+}
+
+void hn_vf_close(struct rte_eth_dev *dev)
+{
+ VF_ETHDEV_FUNC(dev, rte_eth_dev_close);
+}
+
+void hn_vf_stats_reset(struct rte_eth_dev *dev)
+{
+ VF_ETHDEV_FUNC(dev, rte_eth_stats_reset);
+}
+
+void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
+}
+
+void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
+}
+
+void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
+}
+
+void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
+}
+
+int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
+ mc_addr_set, nb_mc_addr);
+ rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
+}
+
+int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
+ queue_idx, nb_desc,
+ socket_id, tx_conf);
+ rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
+}
+
+void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
+{
+ struct rte_eth_dev *vf_dev;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
+ void *subq = vf_dev->data->tx_queues[queue_id];
+
+ (*vf_dev->dev_ops->tx_queue_release)(subq);
+ }
+
+ rte_spinlock_unlock(&hv->vf_lock);
+}
+
+int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
+ queue_idx, nb_desc,
+ socket_id, rx_conf, mp);
+ rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
+}
+
+void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
+{
+ struct rte_eth_dev *vf_dev;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
+ void *subq = vf_dev->data->rx_queues[queue_id];
+
+ (*vf_dev->dev_ops->rx_queue_release)(subq);
+ }
+ rte_spinlock_unlock(&hv->vf_lock);
+}
+
+int hn_vf_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int ret = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev)
+ ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
+ rte_spinlock_unlock(&hv->vf_lock);
+ return ret;
+}
+
+int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *names,
+ unsigned int n)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int i, count = 0;
+ char tmp[RTE_ETH_XSTATS_NAME_SIZE];
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev && vf_dev->dev_ops->xstats_get_names)
+ count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
+ rte_spinlock_unlock(&hv->vf_lock);
+
+ /* add vf_ prefix to xstat names */
+ if (names) {
+ for (i = 0; i < count; i++) {
+ snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
+ strlcpy(names[i].name, tmp, sizeof(names[i].name));
+ }
+ }
+
+ return count;
+}
+
+int hn_vf_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+ int count = 0;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev && vf_dev->dev_ops->xstats_get)
+ count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
+ rte_spinlock_unlock(&hv->vf_lock);
+
+ return count;
+}
+
+void hn_vf_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_dev *vf_dev;
+
+ rte_spinlock_lock(&hv->vf_lock);
+ vf_dev = hv->vf_dev;
+ if (vf_dev && vf_dev->dev_ops->xstats_reset)
+ vf_dev->dev_ops->xstats_reset(vf_dev);
+ rte_spinlock_unlock(&hv->vf_lock);
+}
diff --git a/drivers/net/netvsc/meson.build b/drivers/net/netvsc/meson.build
index a717cdd4..c8426971 100644
--- a/drivers/net/netvsc/meson.build
+++ b/drivers/net/netvsc/meson.build
@@ -3,7 +3,7 @@
build = dpdk_conf.has('RTE_LIBRTE_VMBUS_BUS')
version = 2
-sources = files('hn_ethdev.c', 'hn_rxtx.c', 'hn_rndis.c', 'hn_nvs.c')
+sources = files('hn_ethdev.c', 'hn_rxtx.c', 'hn_rndis.c', 'hn_nvs.c', 'hn_vf.c')
deps += ['bus_vmbus' ]
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index ab4e0a7d..d3fa5699 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -10,6 +10,7 @@ LIB = librte_pmd_nfp.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lm
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
index 3ba37e27..ba6a22e8 100644
--- a/drivers/net/nfp/meson.build
+++ b/drivers/net/nfp/meson.build
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
+if host_machine.system() != 'linux'
+ build = false
+endif
sources = files('nfpcore/nfp_cpp_pcie_ops.c',
'nfpcore/nfp_nsp.c',
'nfpcore/nfp_cppcore.c',
@@ -14,3 +17,5 @@ sources = files('nfpcore/nfp_cpp_pcie_ops.c',
'nfpcore/nfp_nsp_eth.c',
'nfpcore/nfp_hwinfo.c',
'nfp_net.c')
+
+allow_experimental_apis = true
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 6e5e305f..bab1f68e 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -411,12 +411,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- /* KEEP_CRC offload flag is not supported by PMD
- * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
- */
- if (rte_eth_dev_must_keep_crc(rxmode->offloads))
- PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
-
return 0;
}
@@ -1168,8 +1162,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_KEEP_CRC;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
@@ -1205,8 +1198,10 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
};
- dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
+ dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_IPV6 |
ETH_RSS_NONFRAG_IPV6_TCP |
ETH_RSS_NONFRAG_IPV6_UDP;
@@ -1786,21 +1781,20 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
return;
/* If IPv4 and IP checksum error, fail */
- if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
+ if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
+ !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
/* If neither UDP nor TCP return */
if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
!(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
return;
- if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
- mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-
- if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
+ if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
+ mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
@@ -1884,6 +1878,18 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
case NFP_NET_RSS_IPV6_EX:
mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
break;
+ case NFP_NET_RSS_IPV4_TCP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV6_TCP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV4_UDP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV6_UDP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
default:
mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
}
@@ -2465,14 +2471,22 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
rss_hf = rss_conf->rss_hf;
if (rss_hf & ETH_RSS_IPV4)
- cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
- NFP_NET_CFG_RSS_IPV4_TCP |
- NFP_NET_CFG_RSS_IPV4_UDP;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
if (rss_hf & ETH_RSS_IPV6)
- cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
- NFP_NET_CFG_RSS_IPV6_TCP |
- NFP_NET_CFG_RSS_IPV6_UDP;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
@@ -2688,6 +2702,14 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ /* NFP can not handle DMA addresses requiring more than 40 bits */
+ if (rte_eal_check_dma_mask(40)) {
+ RTE_LOG(ERR, PMD, "device %s can not be used:",
+ pci_dev->device.name);
+ RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
+ return -ENODEV;
+ };
+
if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
port = get_pf_port_number(eth_dev->data->name);
@@ -2886,6 +2908,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
ether_addr_copy((struct ether_addr *)hw->mac_addr,
&eth_dev->data->mac_addrs[0]);
+ if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
+
PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
"mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
@@ -3265,14 +3290,16 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_nfp_net_pf_pmd = {
.id_table = pci_id_nfp_pf_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = nfp_pf_pci_probe,
.remove = eth_nfp_pci_remove,
};
static struct rte_pci_driver rte_nfp_net_vf_pmd = {
.id_table = pci_id_nfp_vf_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_nfp_pci_probe,
.remove = eth_nfp_pci_remove,
};
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index c1b044ee..b01036df 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -293,6 +293,8 @@ struct nfp_net_txq {
#define PCIE_DESC_RX_UDP_CSUM_OK (1 << 1)
#define PCIE_DESC_RX_VLAN (1 << 0)
+#define PCIE_DESC_RX_L4_CSUM_OK (PCIE_DESC_RX_TCP_CSUM_OK | \
+ PCIE_DESC_RX_UDP_CSUM_OK)
struct nfp_net_rx_desc {
union {
/* Freelist descriptor */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 244f8654..159c1c1f 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -305,7 +305,6 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = 0;
dev_info->reta_size = internals->reta_size;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -615,8 +614,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
params = rte_vdev_device_args(dev);
PMD_LOG(INFO, "Initializing pmd_null for %s", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(params) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
PMD_LOG(ERR, "Failed to probe %s", name);
@@ -681,7 +679,9 @@ rte_pmd_null_remove(struct rte_vdev_device *dev)
if (eth_dev == NULL)
return -1;
- rte_free(eth_dev->data->dev_private);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(eth_dev);
diff --git a/drivers/net/octeontx/base/meson.build b/drivers/net/octeontx/base/meson.build
index 09f657ab..a06a2c89 100644
--- a/drivers/net/octeontx/base/meson.build
+++ b/drivers/net/octeontx/base/meson.build
@@ -13,8 +13,12 @@ foreach d: depends
static_objs += [get_variable('static_rte_' + d)]
endforeach
+c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
base_lib = static_library('octeontx_base', sources,
- c_args: cflags,
+ c_args: c_args,
dependencies: static_objs,
)
diff --git a/drivers/net/octeontx/base/octeontx_io.h b/drivers/net/octeontx/base/octeontx_io.h
index d51ded23..04b9ce19 100644
--- a/drivers/net/octeontx/base/octeontx_io.h
+++ b/drivers/net/octeontx/base/octeontx_io.h
@@ -10,7 +10,7 @@
#include <rte_io.h>
-/* In Cavium OcteonTX SoC, all accesses to the device registers are
+/* In Cavium OCTEON TX SoC, all accesses to the device registers are
* implicitly strongly ordered. So, The relaxed version of IO operation is
* safe to use with out any IO memory barriers.
*/
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 0f3d5d67..06814862 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -281,14 +281,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- /* KEEP_CRC offload flag is not supported by PMD
- * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
- */
- if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
- PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
- rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
@@ -1023,12 +1015,22 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
return 0;
}
+ /* Reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(octtx_name);
+ if (eth_dev == NULL) {
+ octeontx_log_err("failed to allocate rte_eth_dev");
+ res = -ENOMEM;
+ goto err;
+ }
+ data = eth_dev->data;
+
nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
if (nic == NULL) {
octeontx_log_err("failed to allocate nic structure");
res = -ENOMEM;
goto err;
}
+ data->dev_private = nic;
nic->port_id = port;
nic->evdev = evdev;
@@ -1045,21 +1047,11 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
goto err;
}
- /* Reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(octtx_name);
- if (eth_dev == NULL) {
- octeontx_log_err("failed to allocate rte_eth_dev");
- res = -ENOMEM;
- goto err;
- }
-
eth_dev->device = &dev->device;
eth_dev->intr_handle = NULL;
eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->data->numa_node = dev->device.numa_node;
- data = eth_dev->data;
- data->dev_private = nic;
data->port_id = eth_dev->data->port_id;
nic->ev_queues = 1;
@@ -1111,12 +1103,7 @@ err:
if (nic)
octeontx_port_close(nic);
- if (eth_dev != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- rte_free(data);
- rte_free(nic);
- rte_eth_dev_release_port(eth_dev);
- }
+ rte_eth_dev_release_port(eth_dev);
return res;
}
@@ -1141,16 +1128,22 @@ octeontx_remove(struct rte_vdev_device *dev)
if (eth_dev == NULL)
return -ENODEV;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ rte_eth_dev_release_port(eth_dev);
+ continue;
+ }
+
nic = octeontx_pmd_priv(eth_dev);
rte_event_dev_stop(nic->evdev);
PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name);
- rte_free(eth_dev->data->mac_addrs);
- rte_free(eth_dev->data->dev_private);
rte_eth_dev_release_port(eth_dev);
rte_event_dev_close(nic->evdev);
}
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
/* Free FC resource */
octeontx_pko_fc_free();
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 14f16969..920f6f89 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -28,8 +28,7 @@
#define OCTEONTX_MAX_BGX_PORTS 4
#define OCTEONTX_MAX_LMAC_PER_BGX 4
-#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CRC_STRIP \
- | DEV_RX_OFFLOAD_CHECKSUM)
+#define OCTEONTX_RX_OFFLOADS DEV_RX_OFFLOAD_CHECKSUM
#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE
static inline struct octeontx_nic *
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index a9149b4e..1e201f32 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -19,40 +19,6 @@
#include "octeontx_rxtx.h"
#include "octeontx_logs.h"
-
-static __rte_always_inline uint16_t __hot
-__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
- struct rte_mbuf *tx_pkt)
-{
- uint64_t cmd_buf[4];
- uint16_t gaura_id;
-
- if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
- return -ENOSPC;
-
- /* Get the gaura Id */
- gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
-
- /* Setup PKO_SEND_HDR_S */
- cmd_buf[0] = tx_pkt->data_len & 0xffff;
- cmd_buf[1] = 0x0;
-
- /* Set don't free bit if reference count > 1 */
- if (rte_mbuf_refcnt_read(tx_pkt) > 1)
- cmd_buf[0] |= (1ULL << 58); /* SET DF */
-
- /* Setup PKO_SEND_GATHER_S */
- cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
- cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
- PKO_SEND_GATHER_LDTYPE(0x1ull) |
- PKO_SEND_GATHER_GAUAR((long)gaura_id) |
- tx_pkt->data_len;
-
- octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
-
- return 0;
-}
-
uint16_t __hot
octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -63,6 +29,7 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
count = 0;
+ rte_cio_wmb();
while (count < nb_pkts) {
res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
dq->fc_status_va,
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index fe3e5ccd..d0d73b30 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -100,6 +100,39 @@ ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
};
+static __rte_always_inline int
+__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
+ struct rte_mbuf *tx_pkt)
+{
+ uint64_t cmd_buf[4] __rte_cache_aligned;
+ uint16_t gaura_id;
+
+ if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
+ return -ENOSPC;
+
+ /* Get the gaura Id */
+ gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
+
+ /* Setup PKO_SEND_HDR_S */
+ cmd_buf[0] = tx_pkt->data_len & 0xffff;
+ cmd_buf[1] = 0x0;
+
+ /* Set don't free bit if reference count > 1 */
+ if (rte_mbuf_refcnt_read(tx_pkt) > 1)
+ cmd_buf[0] |= (1ULL << 58); /* SET DF */
+
+ /* Setup PKO_SEND_GATHER_S */
+ cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
+ cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
+ PKO_SEND_GATHER_LDTYPE(0x1ull) |
+ PKO_SEND_GATHER_GAUAR((long)gaura_id) |
+ tx_pkt->data_len;
+
+ octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
+
+ return 0;
+}
+
uint16_t
octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index e8810a17..7bbe72e2 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -7,6 +7,14 @@
#include <time.h>
#include <net/if.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#if defined(RTE_EXEC_ENV_BSDAPP)
+#include <sys/sysctl.h>
+#include <net/if_dl.h>
+#endif
#include <pcap.h>
@@ -17,6 +25,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_bus_vdev.h>
+#include <rte_string_fns.h>
#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
@@ -29,6 +38,7 @@
#define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
#define ETH_PCAP_IFACE_ARG "iface"
+#define ETH_PCAP_PHY_MAC_ARG "phy_mac"
#define ETH_PCAP_ARG_MAXLEN 64
@@ -39,6 +49,7 @@ static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
static struct timeval start_time;
static uint64_t start_cycles;
static uint64_t hz;
+static uint8_t iface_idx;
struct queue_stat {
volatile unsigned long pkts;
@@ -66,8 +77,10 @@ struct pcap_tx_queue {
struct pmd_internals {
struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
+ struct ether_addr eth_addr;
int if_index;
int single_iface;
+ int phy_mac;
};
struct pmd_devargs {
@@ -78,6 +91,7 @@ struct pmd_devargs {
const char *name;
const char *type;
} queue[RTE_PMD_PCAP_MAX_QUEUES];
+ int phy_mac;
};
static const char *valid_arguments[] = {
@@ -87,13 +101,10 @@ static const char *valid_arguments[] = {
ETH_PCAP_RX_IFACE_IN_ARG,
ETH_PCAP_TX_IFACE_ARG,
ETH_PCAP_IFACE_ARG,
+ ETH_PCAP_PHY_MAC_ARG,
NULL
};
-static struct ether_addr eth_addr = {
- .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 }
-};
-
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -553,7 +564,6 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -862,6 +872,20 @@ open_tx_iface(const char *key, const char *value, void *extra_args)
return open_iface(key, value, extra_args);
}
+static int
+select_phy_mac(const char *key __rte_unused, const char *value,
+ void *extra_args)
+{
+ if (extra_args) {
+ const int phy_mac = atoi(value);
+ int *enable_phy_mac = extra_args;
+
+ if (phy_mac)
+ *enable_phy_mac = 1;
+ }
+ return 0;
+}
+
static struct rte_vdev_driver pmd_pcap_drv;
static int
@@ -889,11 +913,20 @@ pmd_init_internals(struct rte_vdev_device *vdev,
* - and point eth_dev structure to new eth_dev_data structure
*/
*internals = (*eth_dev)->data->dev_private;
+ /*
+ * Interface MAC = 02:70:63:61:70:<iface_idx>
+ * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
+ * where the middle 4 characters are converted to hex.
+ */
+ (*internals)->eth_addr = (struct ether_addr) {
+ .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
+ };
+ (*internals)->phy_mac = 0;
data = (*eth_dev)->data;
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
- data->mac_addrs = &eth_addr;
+ data->mac_addrs = &(*internals)->eth_addr;
/*
* NOTE: we'll replace the data element, of originally allocated
@@ -905,14 +938,95 @@ pmd_init_internals(struct rte_vdev_device *vdev,
}
static int
+eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
+ const unsigned int numa_node)
+{
+#if defined(RTE_EXEC_ENV_LINUXAPP)
+ void *mac_addrs;
+ struct ifreq ifr;
+ int if_fd = socket(AF_INET, SOCK_DGRAM, 0);
+
+ if (if_fd == -1)
+ return -1;
+
+ rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name));
+ if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) {
+ close(if_fd);
+ return -1;
+ }
+
+ mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
+ if (!mac_addrs) {
+ close(if_fd);
+ return -1;
+ }
+
+ PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
+ eth_dev->data->mac_addrs = mac_addrs;
+ rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
+ ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+
+ close(if_fd);
+
+ return 0;
+
+#elif defined(RTE_EXEC_ENV_BSDAPP)
+ void *mac_addrs;
+ struct if_msghdr *ifm;
+ struct sockaddr_dl *sdl;
+ int mib[6];
+ size_t len = 0;
+ char *buf;
+
+ mib[0] = CTL_NET;
+ mib[1] = AF_ROUTE;
+ mib[2] = 0;
+ mib[3] = AF_LINK;
+ mib[4] = NET_RT_IFLIST;
+ mib[5] = if_nametoindex(if_name);
+
+ if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0)
+ return -1;
+
+ if (len == 0)
+ return -1;
+
+ buf = rte_malloc(NULL, len, 0);
+ if (!buf)
+ return -1;
+
+ if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
+ rte_free(buf);
+ return -1;
+ }
+ ifm = (struct if_msghdr *)buf;
+ sdl = (struct sockaddr_dl *)(ifm + 1);
+
+ mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
+ if (!mac_addrs) {
+ rte_free(buf);
+ return -1;
+ }
+
+ PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
+ eth_dev->data->mac_addrs = mac_addrs;
+ rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
+ LLADDR(sdl), ETHER_ADDR_LEN);
+
+ rte_free(buf);
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static int
eth_from_pcaps_common(struct rte_vdev_device *vdev,
struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
- struct rte_kvargs *kvlist, struct pmd_internals **internals,
- struct rte_eth_dev **eth_dev)
+ struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
{
- struct rte_kvargs_pair *pair = NULL;
- unsigned int k_idx;
unsigned int i;
/* do some parameter checking */
@@ -944,17 +1058,6 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev,
snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
}
- for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
- pair = &kvlist->pairs[k_idx];
- if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
- break;
- }
-
- if (pair == NULL)
- (*internals)->if_index = 0;
- else
- (*internals)->if_index = if_nametoindex(pair->value);
-
return 0;
}
@@ -962,15 +1065,14 @@ static int
eth_from_pcaps(struct rte_vdev_device *vdev,
struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
- struct rte_kvargs *kvlist, int single_iface,
- unsigned int using_dumpers)
+ int single_iface, unsigned int using_dumpers)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
int ret;
ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, kvlist, &internals, &eth_dev);
+ tx_queues, nb_tx_queues, &internals, &eth_dev);
if (ret < 0)
return ret;
@@ -978,6 +1080,18 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
/* store weather we are using a single interface for rx/tx or not */
internals->single_iface = single_iface;
+ if (single_iface) {
+ internals->if_index = if_nametoindex(rx_queues->queue[0].name);
+
+ /* phy_mac arg is applied only only if "iface" devarg is provided */
+ if (rx_queues->phy_mac) {
+ int ret = eth_pcap_update_mac(rx_queues->queue[0].name,
+ eth_dev, vdev->device.numa_node);
+ if (ret == 0)
+ internals->phy_mac = 1;
+ }
+ }
+
eth_dev->rx_pkt_burst = eth_pcap_rx;
if (using_dumpers)
@@ -1008,8 +1122,7 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
PMD_LOG(ERR, "Failed to probe %s", name);
@@ -1034,12 +1147,18 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
&open_rx_tx_iface, &pcaps);
-
if (ret < 0)
goto free_kvlist;
dumpers.queue[0] = pcaps.queue[0];
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
+ &select_phy_mac, &pcaps.phy_mac);
+ if (ret < 0)
+ goto free_kvlist;
+
+ dumpers.phy_mac = pcaps.phy_mac;
+
single_iface = 1;
pcaps.num_of_queue = 1;
dumpers.num_of_queue = 1;
@@ -1084,7 +1203,7 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
create_eth:
ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
- dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
+ dumpers.num_of_queue, single_iface, is_tx_pcap);
free_kvlist:
rte_kvargs_free(kvlist);
@@ -1095,6 +1214,7 @@ free_kvlist:
static int
pmd_pcap_remove(struct rte_vdev_device *dev)
{
+ struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
@@ -1108,7 +1228,12 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
if (eth_dev == NULL)
return -1;
- rte_free(eth_dev->data->dev_private);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ internals = eth_dev->data->dev_private;
+ if (internals != NULL && internals->phy_mac == 0)
+ /* not dynamically allocated, must not be freed */
+ eth_dev->data->mac_addrs = NULL;
+ }
rte_eth_dev_release_port(eth_dev);
@@ -1128,7 +1253,8 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
ETH_PCAP_RX_IFACE_ARG "=<ifc> "
ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
- ETH_PCAP_IFACE_ARG "=<ifc>");
+ ETH_PCAP_IFACE_ARG "=<ifc> "
+ ETH_PCAP_PHY_MAC_ARG "=<int>");
RTE_INIT(eth_pcap_init_log)
{
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 488ca1d9..2ecbd8d2 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -105,6 +105,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_filter.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index d5d6f8e2..693328f1 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -17,7 +17,7 @@
/* Array of memzone pointers */
static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
/* Counter to track current memzone allocated */
-uint16_t ecore_mz_count;
+static uint16_t ecore_mz_count;
unsigned long qede_log2_align(unsigned long n)
{
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 630867fa..1abf44fa 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -447,10 +447,13 @@ u32 qede_crc32(u32 crc, u8 *ptr, u32 length);
#define OSAL_CRC8(table, pdata, nbytes, crc) 0
#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_HW_INFO_CHANGE(p_hwfn, change) nothing
#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
#define OSAL_DIV_S64(a, b) ((a) / (b))
#define OSAL_LLDP_RX_TLVS(p_hwfn, tlv_buf, tlv_size) nothing
+#define OSAL_DBG_ALLOC_USER_DATA(p_hwfn, user_data_ptr) (0)
+#define OSAL_DB_REC_OCCURRED(p_hwfn) nothing
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index ca8e59db..2aaf298f 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -95,8 +95,8 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 33
-#define FW_REVISION_VERSION 12
+#define FW_MINOR_VERSION 37
+#define FW_REVISION_VERSION 7
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -1033,13 +1033,14 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
-/* RoCE completion flag */
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+/* RoCE ack request (will be set 1) */
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28
#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+/* RoCE completion flag for FW use */
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30
/* Connection type is iWARP */
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 5d79fdf0..524a1dd4 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -19,6 +19,7 @@
#include <zlib.h>
#endif
+#include "ecore_status.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_debug_tools.h"
#include "ecore_hsi_init_func.h"
@@ -27,8 +28,8 @@
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
-#define ECORE_MINOR_VERSION 30
-#define ECORE_REVISION_VERSION 8
+#define ECORE_MINOR_VERSION 37
+#define ECORE_REVISION_VERSION 20
#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
@@ -207,6 +208,7 @@ struct ecore_l2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
+struct ecore_llh_info;
struct ecore_rt_data {
u32 *init_val;
@@ -543,6 +545,9 @@ enum ecore_mf_mode_bit {
/* Use stag for steering */
ECORE_MF_8021AD_TAGGING,
+
+ /* Allow FIP discovery fallback */
+ ECORE_MF_FIP_SPECIAL,
};
enum ecore_ufp_mode {
@@ -660,6 +665,7 @@ struct ecore_hwfn {
#endif
struct dbg_tools_data dbg_info;
+ void *dbg_user_info;
struct z_stream_s *stream;
@@ -739,6 +745,7 @@ struct ecore_dev {
#endif
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+#define ECORE_IS_E4(dev) (ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
u16 vendor_id;
u16 device_id;
@@ -833,8 +840,26 @@ struct ecore_dev {
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1)
+ /* Engine affinity */
+ u8 l2_affin_hint;
+ u8 fir_affin;
+ u8 iwarp_affin;
+ /* Macro for getting the engine-affinitized hwfn for FCoE/iSCSI/RoCE */
+#define ECORE_FIR_AFFIN_HWFN(dev) (&dev->hwfns[dev->fir_affin])
+ /* Macro for getting the engine-affinitized hwfn for iWARP */
+#define ECORE_IWARP_AFFIN_HWFN(dev) (&dev->hwfns[dev->iwarp_affin])
+ /* Generic macro for getting the engine-affinitized hwfn */
+#define ECORE_AFFIN_HWFN(dev) \
+ (ECORE_IS_IWARP_PERSONALITY(ECORE_LEADING_HWFN(dev)) ? \
+ ECORE_IWARP_AFFIN_HWFN(dev) : \
+ ECORE_FIR_AFFIN_HWFN(dev))
+ /* Macro for getting the index (0/1) of the engine-affinitized hwfn */
+#define ECORE_AFFIN_HWFN_IDX(dev) \
+ (IS_LEAD_HWFN(ECORE_AFFIN_HWFN(dev)) ? 0 : 1)
+
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
@@ -869,6 +894,12 @@ struct ecore_dev {
#ifndef ASIC_ONLY
bool b_is_emul_full;
#endif
+ /* LLH info */
+ u8 ppfid_bitmap;
+ struct ecore_llh_info *p_llh_info;
+
+ /* Indicates whether this PF serves a storage target */
+ bool b_is_target;
#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
void *firmware;
@@ -958,6 +989,8 @@ void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
enum ecore_db_rec_exec);
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn);
+
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
@@ -965,6 +998,29 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
-#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ecore_device_num_ports((_p_hwfn)->p_dev))
+
+/* The PFID<->PPFID calculation is based on the relative index of a PF on its
+ * port. In BB there is a bug in the LLH in which the PPFID is actually engine
+ * based, and thus it equals the PFID.
+ */
+#define ECORE_PFID_BY_PPFID(_p_hwfn, abs_ppfid) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? \
+ (abs_ppfid) : \
+ (abs_ppfid) * (_p_hwfn)->p_dev->num_ports_in_engine + \
+ MFW_PORT(_p_hwfn))
+#define ECORE_PPFID_BY_PFID(_p_hwfn) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? \
+ (_p_hwfn)->rel_pf_id : \
+ (_p_hwfn)->rel_pf_id / (_p_hwfn)->p_dev->num_ports_in_engine)
+
+enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 addr,
+ u32 val);
+
+/* Utility functions for dumping the content of the NIG LLH filters */
+enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid);
+enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index bf36ce58..5c3370e1 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1133,6 +1133,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
+ /* Set the cxt mangr pointer prior to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
/* Initialize ILT client registers */
clients = p_mngr->clients;
clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
@@ -1174,13 +1177,13 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
/* Initialize the dynamic ILT allocation mutex */
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex)) {
+ DP_NOTICE(p_hwfn, false, "Failed to alloc p_mngr->mutex\n");
+ return ECORE_NOMEM;
+ }
#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
- /* Set the cxt mangr pointer priori to further allocations */
- p_hwfn->p_cxt_mngr = p_mngr;
-
return ECORE_SUCCESS;
}
@@ -2111,7 +2114,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
- 0 /* no flags */);
+ OSAL_NULL /* default parameters */);
if (elem_type == ECORE_ELEM_CXT) {
u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
@@ -2218,7 +2221,7 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
(u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset,
sizeof(ilt_hw_entry) / sizeof(u32),
- 0 /* no flags */);
+ OSAL_NULL /* default parameters */);
}
ecore_ptt_release(p_hwfn, p_ptt);
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 96678745..cbc69cde 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -129,7 +129,7 @@ u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
static void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
- struct ecore_hwfn *p_hwfn,
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
@@ -154,12 +154,19 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
/* QM reconf data */
if (p_hwfn->hw_info.personality == personality)
p_hwfn->hw_info.offload_tc = tc;
+
+ /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) &&
+ (type == DCBX_PROTOCOL_ROCE)) {
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+ }
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
- struct ecore_hwfn *p_hwfn,
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
@@ -175,7 +182,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
- ecore_dcbx_set_params(p_data, p_hwfn, enable,
+ ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
prio, tc, type, personality);
}
}
@@ -231,7 +238,7 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
-ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
+ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
int count, u8 dcbx_version)
@@ -280,8 +287,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
enable = true;
}
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
- priority, tc, type);
+ ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
+ enable, priority, tc, type);
}
}
@@ -302,8 +309,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
- enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
+ /* if no app tlv was present, don't override in FW */
+ ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,
priority, tc, type);
}
@@ -314,11 +321,11 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
-ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
+ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct dcbx_app_priority_feature *p_app;
struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_results data = { 0 };
+ struct ecore_dcbx_results data;
struct dcbx_ets_feature *p_ets;
struct ecore_hw_info *p_info;
u32 pri_tc_tbl, flags;
@@ -338,7 +345,8 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
p_info = &p_hwfn->hw_info;
num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ OSAL_MEMSET(&data, 0, sizeof(struct ecore_dcbx_results));
+ rc = ecore_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc != ECORE_SUCCESS)
return rc;
@@ -879,7 +887,7 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
- rc = ecore_dcbx_process_mib_info(p_hwfn);
+ rc = ecore_dcbx_process_mib_info(p_hwfn, p_ptt);
if (!rc) {
/* reconfigure tcs of QM queues according
* to negotiation results
@@ -893,12 +901,19 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
- /* Update the DSCP to TC mapping bit if required */
+ /* Update the DSCP to TC mapping enable bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
p_hwfn->p_dcbx_info->dscp_nig_update) {
u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
+ u32 addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE;
+
+ rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to update the DSCP to TC mapping enable bit\n");
+ return rc;
+ }
- ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, val);
p_hwfn->p_dcbx_info->dscp_nig_update = false;
}
@@ -1533,3 +1548,59 @@ ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
+
+enum _ecore_status_t
+ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
+ u8 dscp_index, u8 *p_dscp_pri)
+{
+ struct ecore_dcbx_get *p_dcbx_info;
+ enum _ecore_status_t rc;
+
+ if (dscp_index >= ECORE_DCBX_DSCP_SIZE) {
+ DP_ERR(p_hwfn, "Invalid dscp index %d\n", dscp_index);
+ return ECORE_INVAL;
+ }
+
+ p_dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_dcbx_info));
+ if (!p_dcbx_info)
+ return ECORE_NOMEM;
+
+ OSAL_MEMSET(p_dcbx_info, 0, sizeof(*p_dcbx_info));
+ rc = ecore_dcbx_query_params(p_hwfn, p_dcbx_info,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ OSAL_FREE(p_hwfn->p_dev, p_dcbx_info);
+ return rc;
+ }
+
+ *p_dscp_pri = p_dcbx_info->dscp.dscp_pri_map[dscp_index];
+ OSAL_FREE(p_hwfn->p_dev, p_dcbx_info);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 dscp_index, u8 pri_val)
+{
+ struct ecore_dcbx_set dcbx_set;
+ enum _ecore_status_t rc;
+
+ if (dscp_index >= ECORE_DCBX_DSCP_SIZE ||
+ pri_val >= ECORE_MAX_PFC_PRIORITIES) {
+ DP_ERR(p_hwfn, "Invalid dscp params: index = %d pri = %d\n",
+ dscp_index, pri_val);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEMSET(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = ecore_dcbx_get_config_params(p_hwfn, &dcbx_set);
+ if (rc)
+ return rc;
+
+ dcbx_set.override_flags = ECORE_DCBX_OVERRIDE_DSCP_CFG;
+ dcbx_set.dscp.dscp_pri_map[dscp_index] = pri_val;
+
+ return ecore_dcbx_config_params(p_hwfn, p_ptt, &dcbx_set, 1);
+}
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index eaf8e082..6fad2ecc 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -228,6 +228,16 @@ enum _ecore_status_t
ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_sys_tlvs *p_params);
+/* Returns priority value for a given dscp index */
+enum _ecore_status_t
+ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
+ u8 dscp_index, u8 *p_dscp_pri);
+
+/* Sets priority value for a given dscp index */
+enum _ecore_status_t
+ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 dscp_index, u8 pri_val);
+
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 31f1f3ee..cf454b19 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -352,6 +352,1189 @@ void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
}
/******************** Doorbell Recovery end ****************/
+/********************************** NIG LLH ***********************************/
+
+enum ecore_llh_filter_type {
+ ECORE_LLH_FILTER_TYPE_MAC,
+ ECORE_LLH_FILTER_TYPE_PROTOCOL,
+};
+
+struct ecore_llh_mac_filter {
+ u8 addr[ETH_ALEN];
+};
+
+struct ecore_llh_protocol_filter {
+ enum ecore_llh_prot_filter_type_t type;
+ u16 source_port_or_eth_type;
+ u16 dest_port;
+};
+
+union ecore_llh_filter {
+ struct ecore_llh_mac_filter mac;
+ struct ecore_llh_protocol_filter protocol;
+};
+
+struct ecore_llh_filter_info {
+ bool b_enabled;
+ u32 ref_cnt;
+ enum ecore_llh_filter_type type;
+ union ecore_llh_filter filter;
+};
+
+struct ecore_llh_info {
+ /* Number of LLH filters banks */
+ u8 num_ppfid;
+
+#define MAX_NUM_PPFID 8
+ u8 ppfid_array[MAX_NUM_PPFID];
+
+ /* Array of filters arrays:
+ * "num_ppfid" elements of filters banks, where each is an array of
+ * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
+ */
+ struct ecore_llh_filter_info **pp_filters;
+};
+
+static void ecore_llh_free(struct ecore_dev *p_dev)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ u32 i;
+
+ if (p_llh_info != OSAL_NULL) {
+ if (p_llh_info->pp_filters != OSAL_NULL) {
+ for (i = 0; i < p_llh_info->num_ppfid; i++)
+ OSAL_FREE(p_dev, p_llh_info->pp_filters[i]);
+ }
+
+ OSAL_FREE(p_dev, p_llh_info->pp_filters);
+ }
+
+ OSAL_FREE(p_dev, p_llh_info);
+ p_dev->p_llh_info = OSAL_NULL;
+}
+
+static enum _ecore_status_t ecore_llh_alloc(struct ecore_dev *p_dev)
+{
+ struct ecore_llh_info *p_llh_info;
+ u32 size;
+ u8 i;
+
+ p_llh_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_llh_info));
+ if (!p_llh_info)
+ return ECORE_NOMEM;
+ p_dev->p_llh_info = p_llh_info;
+
+ for (i = 0; i < MAX_NUM_PPFID; i++) {
+ if (!(p_dev->ppfid_bitmap & (0x1 << i)))
+ continue;
+
+ p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP, "ppfid_array[%d] = %hhd\n",
+ p_llh_info->num_ppfid, i);
+ p_llh_info->num_ppfid++;
+ }
+
+ size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
+ p_llh_info->pp_filters = OSAL_ZALLOC(p_dev, GFP_KERNEL, size);
+ if (!p_llh_info->pp_filters)
+ return ECORE_NOMEM;
+
+ size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(**p_llh_info->pp_filters);
+ for (i = 0; i < p_llh_info->num_ppfid; i++) {
+ p_llh_info->pp_filters[i] = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ size);
+ if (!p_llh_info->pp_filters[i])
+ return ECORE_NOMEM;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_llh_shadow_sanity(struct ecore_dev *p_dev,
+ u8 ppfid, u8 filter_idx,
+ const char *action)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(p_dev, false,
+ "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
+ action, ppfid, p_llh_info->num_ppfid);
+ return ECORE_INVAL;
+ }
+
+ if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_dev, false,
+ "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
+ action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_LLH_INVALID_FILTER_IDX 0xff
+
+static enum _ecore_status_t
+ecore_llh_shadow_search_filter(struct ecore_dev *p_dev, u8 ppfid,
+ union ecore_llh_filter *p_filter,
+ u8 *p_filter_idx)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+ u8 i;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "search");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!OSAL_MEMCMP(p_filter, &p_filters[i].filter,
+ sizeof(*p_filter))) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_get_free_idx(struct ecore_dev *p_dev, u8 ppfid,
+ u8 *p_filter_idx)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+ u8 i;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "get_free_idx");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!p_filters[i].b_enabled) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+__ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, u8 filter_idx,
+ enum ecore_llh_filter_type type,
+ union ecore_llh_filter *p_filter, u32 *p_ref_cnt)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "add");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ p_filters[filter_idx].b_enabled = true;
+ p_filters[filter_idx].type = type;
+ OSAL_MEMCPY(&p_filters[filter_idx].filter, p_filter,
+ sizeof(p_filters[filter_idx].filter));
+ }
+
+ *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_filter_type type,
+ union ecore_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ enum _ecore_status_t rc;
+
+ /* Check if the same filter already exist */
+ rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
+ p_filter_idx);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Find a new entry in case of a new filter */
+ if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
+ rc = ecore_llh_shadow_get_free_idx(p_dev, ppfid, p_filter_idx);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ /* No free entry was found */
+ if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(p_dev, false,
+ "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
+ ppfid);
+ return ECORE_NORESOURCES;
+ }
+
+ return __ecore_llh_shadow_add_filter(p_dev, ppfid, *p_filter_idx, type,
+ p_filter, p_ref_cnt);
+}
+
+static enum _ecore_status_t
+__ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 filter_idx, u32 *p_ref_cnt)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "remove");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ DP_NOTICE(p_dev, false,
+ "LLH shadow: trying to remove a filter with ref_cnt=0\n");
+ return ECORE_INVAL;
+ }
+
+ *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
+ if (!p_filters[filter_idx].ref_cnt)
+ OSAL_MEM_ZERO(&p_filters[filter_idx],
+ sizeof(p_filters[filter_idx]));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
+ union ecore_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
+ p_filter_idx);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* No matching filter was found */
+ if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(p_dev, false,
+ "Failed to find a filter in the LLH shadow\n");
+ return ECORE_INVAL;
+ }
+
+ return __ecore_llh_shadow_remove_filter(p_dev, ppfid, *p_filter_idx,
+ p_ref_cnt);
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "remove_all");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ OSAL_MEM_ZERO(p_filters,
+ NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
+ u8 rel_ppfid, u8 *p_abs_ppfid)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ u8 ppfids = p_llh_info->num_ppfid - 1;
+
+ if (rel_ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(p_dev, false,
+ "rel_ppfid %d is not valid, available indices are 0..%hhd\n",
+ rel_ppfid, ppfids);
+ return ECORE_INVAL;
+ }
+
+ *p_abs_ppfid = p_llh_info->ppfid_array[rel_ppfid];
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+__ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum ecore_eng eng;
+ u8 ppfid;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_get_engine_config(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to get the engine affinity configuration\n");
+ return rc;
+ }
+
+ /* RoCE PF is bound to a single engine */
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+ eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
+ rc = ecore_llh_set_roce_affinity(p_dev, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the RoCE engine affinity\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Set the engine affinity of RoCE packets as %d\n",
+ eng);
+ }
+
+ /* Storage PF is bound to a single engine while L2 PF uses both */
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
+ eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
+ else /* L2_PERSONALITY */
+ eng = ECORE_BOTH_ENG;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool avoid_eng_affin)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum _ecore_status_t rc;
+
+ /* Backwards compatible mode:
+ * - RoCE packets - Use engine 0.
+ * - Non-RoCE packets - Use connection based classification for L2 PFs,
+ * and engine 0 otherwise.
+ */
+ if (avoid_eng_affin) {
+ enum ecore_eng eng;
+ u8 ppfid;
+
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+ eng = ECORE_ENG0;
+ rc = ecore_llh_set_roce_affinity(p_dev, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the RoCE engine affinity\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH [backwards compatible mode]: Set the engine affinity of RoCE packets as %d\n",
+ eng);
+ }
+
+ eng = (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) ? ECORE_ENG0
+ : ECORE_BOTH_ENG;
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH [backwards compatible mode]: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return ECORE_SUCCESS;
+ }
+
+ return __ecore_llh_set_engine_affin(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool avoid_eng_affin)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 ppfid, abs_ppfid;
+ enum _ecore_status_t rc;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ u32 addr;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
+ ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
+ !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) {
+ rc = ecore_llh_add_mac_filter(p_dev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false,
+ "Failed to add an LLH filter with the primary MAC\n");
+ }
+
+ if (ECORE_IS_CMT(p_dev)) {
+ rc = ecore_llh_set_engine_affin(p_hwfn, p_ptt, avoid_eng_affin);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev)
+{
+ return p_dev->p_llh_info->num_ppfid;
+}
+
+enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev)
+{
+ return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0;
+}
+
+/* TBD - should be removed when these definitions are available in reg_addr.h */
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2
+
+enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
+ u8 ppfid, enum ecore_eng eng)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 abs_ppfid;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!ECORE_IS_CMT(p_dev))
+ goto out;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ switch (eng) {
+ case ECORE_ENG0:
+ eng_sel = 0;
+ break;
+ case ECORE_ENG1:
+ eng_sel = 1;
+ break;
+ case ECORE_BOTH_ENG:
+ eng_sel = 2;
+ break;
+ default:
+ DP_NOTICE(p_dev, false,
+ "Invalid affinity value for ppfid [%d]\n", eng);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+
+ /* The iWARP affinity is set as the affinity of ppfid 0 */
+ if (!ppfid && ECORE_IS_IWARP_PERSONALITY(p_hwfn))
+ p_dev->iwarp_affin = (eng == ECORE_ENG1) ? 1 : 0;
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
+ enum ecore_eng eng)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 ppfid, abs_ppfid;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!ECORE_IS_CMT(p_dev))
+ goto out;
+
+ switch (eng) {
+ case ECORE_ENG0:
+ eng_sel = 0;
+ break;
+ case ECORE_ENG1:
+ eng_sel = 1;
+ break;
+ case ECORE_BOTH_ENG:
+ eng_sel = 2;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
+ 0xf /* QP bit 15 */);
+ break;
+ default:
+ DP_NOTICE(p_dev, false,
+ "Invalid affinity value for RoCE [%d]\n", eng);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+struct ecore_llh_filter_e4_details {
+ u64 value;
+ u32 mode;
+ u32 protocol_type;
+ u32 hdr_sel;
+ u32 enable;
+};
+
+static enum _ecore_status_t
+ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
+ struct ecore_llh_filter_e4_details *p_details,
+ bool b_write_access)
+{
+ u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+ u32 addr;
+
+ /* The NIG/LLH registers that are accessed in this function have only 16
+ * rows which are exposed to a PF. I.e. only the 16 filters of its
+ * default ppfid
+ * Accessing filters of other ppfids requires pretending to other PFs,
+ * and thus the usage of the ecore_ppfid_rd/wr() functions.
+ */
+
+ /* Filter enable - should be done first when removing a filter */
+ if (b_write_access && !p_details->enable) {
+ addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->enable);
+ }
+
+ /* Filter value */
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;
+ OSAL_MEMSET(&params, 0, sizeof(params));
+
+ if (b_write_access) {
+ params.flags = ECORE_DMAE_FLAG_PF_DST;
+ params.dst_pfid = pfid;
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&p_details->value,
+ addr, 2 /* size_in_dwords */, &params);
+ } else {
+ params.flags = ECORE_DMAE_FLAG_PF_SRC |
+ ECORE_DMAE_FLAG_COMPLETION_DST;
+ params.src_pfid = pfid;
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr,
+ (u64)(osal_uintptr_t)&p_details->value,
+ 2 /* size_in_dwords */, &params);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Filter mode */
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);
+ else
+ p_details->mode = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
+ addr);
+
+ /* Filter protocol type */
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->protocol_type);
+ else
+ p_details->protocol_type = ecore_ppfid_rd(p_hwfn, p_ptt,
+ abs_ppfid, addr);
+
+ /* Filter header select */
+ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->hdr_sel);
+ else
+ p_details->hdr_sel = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
+ addr);
+
+ /* Filter enable - should be done last when adding a filter */
+ if (!b_write_access || p_details->enable) {
+ addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->enable);
+ else
+ p_details->enable = ecore_ppfid_rd(p_hwfn, p_ptt,
+ abs_ppfid, addr);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
+ u32 high, u32 low)
+{
+ struct ecore_llh_filter_e4_details filter_details;
+
+ filter_details.enable = 1;
+ filter_details.value = ((u64)high << 32) | low;
+ filter_details.hdr_sel =
+ OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits) ?
+ 1 : /* inner/encapsulated header */
+ 0; /* outer/tunnel header */
+ filter_details.protocol_type = filter_prot_type;
+ filter_details.mode = filter_prot_type ?
+ 1 : /* protocol-based classification */
+ 0; /* MAC-address based classification */
+
+ return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
+{
+ struct ecore_llh_filter_e4_details filter_details;
+
+ OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
+
+ return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
+}
+
+static enum _ecore_status_t
+ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
+ u32 low)
+{
+ return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, filter_prot_type,
+ high, low);
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx)
+{
+ return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+}
+
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN])
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ union ecore_llh_filter filter;
+ u8 filter_idx, abs_ppfid;
+ u32 high, low, ref_cnt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+ rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
+ ECORE_LLH_FILTER_TYPE_MAC,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ high = mac_addr[1] | (mac_addr[0] << 8);
+ low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
+ (mac_addr[2] << 24);
+ rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ 0, high, low);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Added MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
+ ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_dev, false,
+ "LLH: Failed to add MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_llh_protocol_filter_stringify(struct ecore_dev *p_dev,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port,
+ char *str, osal_size_t str_len)
+{
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ OSAL_SNPRINTF(str, str_len, "Ethertype 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ OSAL_SNPRINTF(str, str_len, "TCP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ OSAL_SNPRINTF(str, str_len, "UDP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "TCP dst port 0x%04x", dest_port);
+ break;
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "UDP dst port 0x%04x", dest_port);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ default:
+ DP_NOTICE(p_dev, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port,
+ u32 *p_high, u32 *p_low)
+{
+ *p_high = 0;
+ *p_low = 0;
+
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ *p_high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ *p_low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ *p_low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ *p_low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_dev, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, type_bitmap;
+ char str[32];
+ union ecore_llh_filter filter;
+ u32 high, low, ref_cnt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ rc = ecore_llh_protocol_filter_stringify(p_dev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
+ ECORE_LLH_FILTER_TYPE_PROTOCOL,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = ecore_llh_protocol_filter_to_hilo(p_dev, type,
+ source_port_or_eth_type,
+ dest_port, &high, &low);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ type_bitmap = 0x1 << type;
+ rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ type_bitmap, high, low);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_hwfn, false,
+ "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
+ str, ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN])
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ union ecore_llh_filter filter;
+ u8 filter_idx, abs_ppfid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 ref_cnt;
+
+ if (p_ptt == OSAL_NULL)
+ return;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+ rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Removed MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
+ ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_dev, false,
+ "LLH: Failed to remove MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+}
+
+void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ char str[32];
+ union ecore_llh_filter filter;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 ref_cnt;
+
+ if (p_ptt == OSAL_NULL)
+ return;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ rc = ecore_llh_protocol_filter_stringify(p_dev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_dev, false,
+ "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
+ str, ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+}
+
+void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_ptt == OSAL_NULL)
+ return;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ rc = ecore_llh_shadow_remove_all_filters(p_dev, ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,
+ abs_ppfid, filter_idx);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+}
+
+void ecore_llh_clear_all_filters(struct ecore_dev *p_dev)
+{
+ u8 ppfid;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ return;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++)
+ ecore_llh_clear_ppfid_filters(p_dev, ppfid);
+}
+
+enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 addr,
+ u32 val)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 ppfid, abs_ppfid;
+ enum _ecore_status_t rc;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, val);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 ppfid)
+{
+ struct ecore_llh_filter_e4_details filter_details;
+ u8 abs_ppfid, filter_idx;
+ u32 addr;
+ enum _ecore_status_t rc;
+
+ rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ DP_NOTICE(p_hwfn, false,
+ "[rel_pf_id %hhd, ppfid={rel %hhd, abs %hhd}, engine_sel 0x%x]\n",
+ p_hwfn->rel_pf_id, ppfid, abs_ppfid,
+ ecore_rd(p_hwfn, p_ptt, addr));
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
+ rc = ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, &filter_details,
+ false /* read access */);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ DP_NOTICE(p_hwfn, false,
+ "filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
+ filter_idx, filter_details.enable,
+ (unsigned long)filter_details.value,
+ filter_details.mode,
+ filter_details.protocol_type, filter_details.hdr_sel);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ enum _ecore_status_t rc;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev)
+{
+ u8 ppfid;
+ enum _ecore_status_t rc;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_llh_dump_ppfid(p_dev, ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/******************************* NIG LLH - End ********************************/
+
/* Configurable */
#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
* load the driver. The number was
@@ -456,6 +1639,12 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
}
+static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->dbg_user_info);
+ p_hwfn->dbg_user_info = OSAL_NULL;
+}
+
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
@@ -470,6 +1659,8 @@ void ecore_resc_free(struct ecore_dev *p_dev)
OSAL_FREE(p_dev, p_dev->reset_stats);
+ ecore_llh_free(p_dev);
+
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -483,6 +1674,7 @@ void ecore_resc_free(struct ecore_dev *p_dev)
ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn);
+ ecore_dbg_user_data_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
/* destroy doorbell recovery mechanism */
@@ -562,12 +1754,11 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
{
u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
- /* @DPDK */
/* num RLs can't exceed resource amount of rls or vports or the
* dcqcn qps
*/
num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
- (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
+ RESC_NUM(p_hwfn, ECORE_VPORT));
/* make sure after we reserve the default and VF rls we'll have
* something left
@@ -828,7 +2019,7 @@ u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
if (tc > max_tc)
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
}
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
@@ -838,17 +2029,17 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
if (vf > max_vf)
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
}
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
{
u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
- if (rl > max_rl)
- DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
-
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+ /* for rate limiters, it is okay to use the modulo behavior - no
+ * DP_ERR
+ */
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + (rl % max_rl);
}
u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
@@ -1334,6 +2525,20 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
+
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate dbg user info structure\n");
+ goto alloc_err;
+ }
+ } /* hwfn loop */
+
+ rc = ecore_llh_alloc(p_dev);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate memory for the llh_info structure\n");
+ goto alloc_err;
}
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
@@ -1476,8 +2681,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
return ECORE_INVAL;
}
- if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
- &p_hwfn->p_dev->mf_bits))
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
else
hw_mode |= 1 << MODE_MF_SI;
@@ -1960,6 +3164,14 @@ enum ECORE_ROCE_EDPM_MODE {
ECORE_ROCE_EDPM_MODE_DISABLE = 2,
};
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
@@ -2047,7 +3259,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
DP_INFO(p_hwfn,
" dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
p_hwfn->dpi_size, p_hwfn->dpi_count,
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ (!ecore_edpm_enabled(p_hwfn)) ?
"disabled" : "enabled");
/* Check return codes from above calls */
@@ -2073,17 +3285,7 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int hw_mode)
{
- u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE];
- u32 val;
enum _ecore_status_t rc = ECORE_SUCCESS;
- u8 i;
-
- /* In CMT for non-RoCE packets - use connection based classification */
- val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0;
- for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++)
- ppf_to_eng_sel[i] = val;
- STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET,
- ppf_to_eng_sel);
/* In CMT the gate should be cleared by the 2nd hwfn */
if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
@@ -2135,12 +3337,8 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
-ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_tunnel_info *p_tunn,
- int hw_mode,
- bool b_hw_start,
- enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
+ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ int hw_mode, struct ecore_hw_init_params *p_params)
{
u8 rel_pf_id = p_hwfn->rel_pf_id;
u32 prs_reg;
@@ -2228,17 +3426,18 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
*/
rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
- if (b_hw_start) {
+
+ if (p_params->b_hw_start) {
/* enable interrupts */
- rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ rc = ecore_int_igu_enable(p_hwfn, p_ptt, p_params->int_mode);
if (rc != ECORE_SUCCESS)
return rc;
/* send function start command */
- rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
- allow_npar_tx_switch);
+ rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn,
+ p_params->allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Function start ramrod failed\n");
@@ -2410,6 +3609,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ether_type;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
@@ -2442,6 +3642,25 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits) ||
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_dev->mf_bits))) {
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits))
+ ether_type = ETHER_TYPE_VLAN;
+ else
+ ether_type = ETHER_TYPE_QINQ;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
@@ -2542,11 +3761,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_params->p_tunn,
p_hwfn->hw_info.hw_mode,
- p_params->b_hw_start,
- p_params->int_mode,
- p_params->allow_npar_tx_switch);
+ p_params);
break;
default:
DP_NOTICE(p_hwfn, false,
@@ -2591,6 +3807,34 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
+ if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
+ if (IS_PF(p_dev)) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
drv_mb_param = STORM_FW_VERSION;
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
@@ -2599,17 +3843,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
- if (!b_default_mtu)
+ if (!b_default_mtu) {
rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.mtu);
- if (rc != ECORE_SUCCESS)
- DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ }
rc = ecore_mcp_ov_update_driver_state(p_hwfn,
p_hwfn->p_main_ptt,
ECORE_OV_DRIVER_STATE_DISABLED);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+ rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_ESWITCH_NONE);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
return rc;
@@ -2742,6 +3992,12 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
rc2 = ECORE_UNKNOWN_ERROR;
}
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point we don't expect the FW to send us async
+ * events
+ */
+
/* perform debug action after PF stop was sent */
OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
@@ -2778,6 +4034,12 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
+ if (IS_LEAD_HWFN(p_hwfn) &&
+ OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
+ !ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+ ecore_llh_remove_mac_filter(p_dev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+
if (!p_dev->recov_in_prog) {
ecore_verify_reg_val(p_hwfn, p_ptt,
QM_REG_USG_CNT_PF_TX, 0);
@@ -2987,15 +4249,30 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
}
- if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
- feat_num[ECORE_FCOE_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
+ &feat_num[ECORE_FCOE_CQ] :
+ &feat_num[ECORE_ISCSI_CQ];
+ u32 limit = sb_cnt.cnt;
- if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
- feat_num[ECORE_ISCSI_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ /* The number of queues should not exceed the number of FP SBs.
+ * In storage target, the queues are divided into pairs of a CQ
+ * and a CmdQ, and each pair uses a single SB. The limit in
+ * this case should allow a max ratio of 2:1 instead of 1:1.
+ */
+ if (p_hwfn->p_dev->b_is_target)
+ limit *= 2;
+ *p_storage_feat = OSAL_MIN_T(u32, limit,
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
+ /* @DPDK */
+ /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
+ * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
+ */
+ *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
+ (NUM_OF_GLOBAL_QUEUES / 2));
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
@@ -3276,6 +4553,59 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+#define ECORE_NONUSED_PPFID_MASK_BB_4P_LO_PORTS 0xaa
+#define ECORE_NONUSED_PPFID_MASK_BB_4P_HI_PORTS 0x55
+#define ECORE_NONUSED_PPFID_MASK_AH_4P 0xf0
+
+static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 native_ppfid_idx = ECORE_PPFID_BY_PFID(p_hwfn), new_bitmap;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL)
+ return rc;
+ else if (rc == ECORE_NOTIMPL)
+ p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+
+ /* 4-ports mode has limitations that should be enforced:
+ * - BB: the MFW can access only PPFIDs which their corresponding PFIDs
+ * belong to this certain port.
+ * - AH/E5: only 4 PPFIDs per port are available.
+ */
+ if (ecore_device_num_ports(p_dev) == 4) {
+ u8 mask;
+
+ if (ECORE_IS_BB(p_dev))
+ mask = MFW_PORT(p_hwfn) > 1 ?
+ ECORE_NONUSED_PPFID_MASK_BB_4P_HI_PORTS :
+ ECORE_NONUSED_PPFID_MASK_BB_4P_LO_PORTS;
+ else
+ mask = ECORE_NONUSED_PPFID_MASK_AH_4P;
+
+ if (p_dev->ppfid_bitmap & mask) {
+ new_bitmap = p_dev->ppfid_bitmap & ~mask;
+ DP_INFO(p_hwfn,
+ "Fix the PPFID bitmap for 4-ports mode: 0x%hhx -> 0x%hhx\n",
+ p_dev->ppfid_bitmap, new_bitmap);
+ p_dev->ppfid_bitmap = new_bitmap;
+ }
+ }
+
+ /* The native PPFID is expected to be part of the allocated bitmap */
+ if (!(p_dev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
+ new_bitmap = 0x1 << native_ppfid_idx;
+ DP_INFO(p_hwfn,
+ "Fix the PPFID bitmap to inculde the native PPFID: %hhd -> 0x%hhx\n",
+ p_dev->ppfid_bitmap, new_bitmap);
+ p_dev->ppfid_bitmap = new_bitmap;
+ }
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool drv_resc_alloc)
@@ -3350,6 +4680,13 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
"Failed to release the resource lock for the resource allocation commands\n");
}
+ /* PPFID bitmap */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = ecore_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
/* Reduced build contains less PQs */
@@ -3621,7 +4958,8 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_MF_MODE_BD:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
- 1 << ECORE_MF_8021AD_TAGGING;
+ 1 << ECORE_MF_8021AD_TAGGING |
+ 1 << ECORE_MF_FIP_SPECIAL;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
@@ -4139,9 +5477,8 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
#endif
static enum _ecore_status_t
-ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM * p_regview,
- void OSAL_IOMEM * p_doorbells,
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
+ void OSAL_IOMEM *p_doorbells, u64 db_phys_addr,
struct ecore_hw_prepare_params *p_params)
{
struct ecore_mdump_retain_data mdump_retain;
@@ -4152,6 +5489,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Split PCI bars evenly between hwfns */
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ p_hwfn->db_phys_addr = db_phys_addr;
if (IS_VF(p_dev))
return ecore_vf_hw_prepare(p_hwfn);
@@ -4217,6 +5555,13 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+ /* Workaround for MFW issue where PF FLR does not cleanup
+ * IGU block
+ */
+ if (!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+ ecore_pf_flr_igu_cleanup(p_hwfn);
}
/* Check if mdump logs/data are present and update the epoch value */
@@ -4287,6 +5632,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
p_hwfn->b_en_pacing = p_params->b_en_pacing;
+ p_dev->b_is_target = p_params->b_is_target;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -4296,9 +5642,9 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
ecore_init_iro_array(p_dev);
/* Initialize the first hwfn - will learn number of hwfns */
- rc = ecore_hw_prepare_single(p_hwfn,
- p_dev->regview,
- p_dev->doorbells, p_params);
+ rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
+ p_dev->doorbells, p_dev->db_phys_addr,
+ p_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -4308,24 +5654,26 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
+ u64 db_phys_addr;
+ u32 offset;
/* adjust bar offset for second engine */
- addr = (u8 OSAL_IOMEM *)p_dev->regview +
- ecore_hw_bar_size(p_hwfn,
- p_hwfn->p_main_ptt,
- BAR_ID_0) / 2;
+ offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
+ addr = (u8 OSAL_IOMEM *)p_dev->regview + offset;
p_regview = (void OSAL_IOMEM *)addr;
- addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
- ecore_hw_bar_size(p_hwfn,
- p_hwfn->p_main_ptt,
- BAR_ID_1) / 2;
+ offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
+ addr = (u8 OSAL_IOMEM *)p_dev->doorbells + offset;
p_doorbell = (void OSAL_IOMEM *)addr;
+ db_phys_addr = p_dev->db_phys_addr + offset;
p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
/* prepare second hw function */
rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
- p_doorbell, p_params);
+ p_doorbell, db_phys_addr,
+ p_params);
/* in case of error, need to free the previously
* initiliazed hwfn 0.
@@ -4722,419 +6070,6 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static enum _ecore_status_t
-ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 high, u32 low,
- u32 *p_entry_num)
-{
- u32 en;
- int i;
-
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32));
- if (en)
- continue;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), low);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), high);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32), 1);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_NORESOURCES;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return ECORE_SUCCESS;
-
- high = p_filter[1] | (p_filter[0] << 8);
- low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low,
- &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Failed to find an empty LLH filter to utilize\n");
- return rc;
- }
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n",
- p_filter[0], p_filter[1], p_filter[2], p_filter[3],
- p_filter[4], p_filter[5], entry_num);
-
- return rc;
-}
-
-static enum _ecore_status_t
-ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 high, u32 low,
- u32 *p_entry_num)
-{
- int i;
-
- /* Find the entry and clean it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), 0);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_INVAL;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return;
-
- high = p_filter[1] | (p_filter[0] << 8);
- low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high,
- low, &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Tried to remove a non-configured filter\n");
- return;
- }
-
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n",
- p_filter[0], p_filter[1], p_filter[2], p_filter[3],
- p_filter[4], p_filter[5], entry_num);
-}
-
-static enum _ecore_status_t
-ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- enum ecore_llh_port_filter_type_t type,
- u32 high, u32 low, u32 *p_entry_num)
-{
- u32 en;
- int i;
-
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32));
- if (en)
- continue;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), low);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), high);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32), 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32), 1 << type);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_NORESOURCES;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-enum _ecore_status_t
-ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return rc;
-
- high = 0;
- low = 0;
-
- switch (type) {
- case ECORE_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case ECORE_LLH_FILTER_TCP_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Non valid LLH protocol filter type %d\n", type);
- return ECORE_INVAL;
- }
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
- high, low, &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Failed to find an empty LLH filter to utilize\n");
- return rc;
- }
- switch (type) {
- case ECORE_LLH_FILTER_ETHERTYPE:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "ETH type %x is added at %d\n",
- source_port_or_eth_type, entry_num);
- break;
- case ECORE_LLH_FILTER_TCP_SRC_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP src port %x is added at %d\n",
- source_port_or_eth_type, entry_num);
- break;
- case ECORE_LLH_FILTER_UDP_SRC_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP src port %x is added at %d\n",
- source_port_or_eth_type, entry_num);
- break;
- case ECORE_LLH_FILTER_TCP_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP dst port %x is added at %d\n", dest_port,
- entry_num);
- break;
- case ECORE_LLH_FILTER_UDP_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP dst port %x is added at %d\n", dest_port,
- entry_num);
- break;
- case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, entry_num);
- break;
- case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, entry_num);
- break;
- }
-
- return rc;
-}
-
-static enum _ecore_status_t
-ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- enum ecore_llh_port_filter_type_t type,
- u32 high, u32 low, u32 *p_entry_num)
-{
- int i;
-
- /* Find the entry and clean it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32)))
- continue;
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32)))
- continue;
- if (!(ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32)) & (1 << type)))
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), 0);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_INVAL;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-void
-ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return;
-
- high = 0;
- low = 0;
-
- switch (type) {
- case ECORE_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case ECORE_LLH_FILTER_TCP_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Non valid LLH protocol filter type %d\n", type);
- return;
- }
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
- high, low,
- &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n",
- type, source_port_or_eth_type, dest_port);
- return;
- }
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n",
- type, source_port_or_eth_type, dest_port, entry_num);
-}
-
-static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- int i;
-
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return;
-
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), 0);
- }
-}
-
-void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
- &p_hwfn->p_dev->mf_bits) &&
- !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return;
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt);
-}
-
enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
@@ -5713,3 +6648,8 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb,
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
+
+bool ecore_is_mf_fip_special(struct ecore_dev *p_dev)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_FIP_SPECIAL, &p_dev->mf_bits);
+}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 02bacc22..73080632 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -114,6 +114,9 @@ struct ecore_hw_init_params {
/* Driver load parameters */
struct ecore_drv_load_params *p_drv_load_params;
+ /* Avoid engine affinity for RoCE/storage in case of CMT mode */
+ bool avoid_eng_affin;
+
/* SPQ block timeout in msec */
u32 spq_timeout_ms;
};
@@ -271,6 +274,9 @@ struct ecore_hw_prepare_params {
/* Enable/disable request by ecore client for pacing */
bool b_en_pacing;
+
+ /* Indicates whether this PF serves a storage target */
+ bool b_is_target;
};
/**
@@ -425,11 +431,17 @@ enum ecore_dmae_address_type_t {
#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
#define ECORE_DMAE_FLAG_VF_DST 0x00000004
#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define ECORE_DMAE_FLAG_PORT 0x00000010
+#define ECORE_DMAE_FLAG_PF_SRC 0x00000020
+#define ECORE_DMAE_FLAG_PF_DST 0x00000040
struct ecore_dmae_params {
u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
};
/**
@@ -441,7 +453,9 @@ struct ecore_dmae_params {
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
- * @param flags (one of the flags defined above)
+ * @param p_params (default parameters will be used in case of OSAL_NULL)
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
@@ -449,7 +463,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
- u32 flags);
+ struct ecore_dmae_params *p_params);
/**
* @brief ecore_dmae_grc2host - Read data from dmae data offset
@@ -459,7 +473,9 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
- * @param flags - one of the flags defined above
+ * @param p_params (default parameters will be used in case of OSAL_NULL)
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
@@ -467,7 +483,7 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
u32 grc_addr,
dma_addr_t dest_addr,
u32 size_in_dwords,
- u32 flags);
+ struct ecore_dmae_params *p_params);
/**
* @brief ecore_dmae_host2host - copy data from to source address
@@ -478,7 +494,9 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
* @param source_addr
* @param dest_addr
* @param size_in_dwords
- * @param params
+ * @param p_params (default parameters will be used in case of OSAL_NULL)
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
@@ -559,28 +577,79 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
u8 *dst_id);
/**
- * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
+ * @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_filter - MAC to add
+ * @param p_dev
+ *
+ * @return u8 - Number of LLH filter banks
*/
-enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter);
+u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev);
+
+enum ecore_eng {
+ ECORE_ENG0,
+ ECORE_ENG1,
+ ECORE_BOTH_ENG,
+};
/**
- * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
+ * @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_filter - MAC to remove
+ * @param p_dev
+ *
+ * @return enum ecore_eng - L2 affintiy hint
+ */
+enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given
+ * LLH filter bank.
+ *
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param eng
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
+ u8 ppfid, enum ecore_eng eng);
+
+/**
+ * @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity
+ *
+ * @param p_dev
+ * @param eng
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
+ enum ecore_eng eng);
+
+/**
+ * @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param mac_addr - MAC to add
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN]);
+
+/**
+ * @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given
+ * filter bank.
+ *
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param mac_addr - MAC to remove
*/
-void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter);
+void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN]);
-enum ecore_llh_port_filter_type_t {
+enum ecore_llh_prot_filter_type_t {
ECORE_LLH_FILTER_ETHERTYPE,
ECORE_LLH_FILTER_TCP_SRC_PORT,
ECORE_LLH_FILTER_TCP_DEST_PORT,
@@ -591,45 +660,52 @@ enum ecore_llh_port_filter_type_t {
};
/**
- * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
+ * @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
- * @param type - type of filters and comparing
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type);
+ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
+ * @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
- * @param type - type of filters and comparing
*/
-void
-ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type);
+void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port);
/**
- * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
+ * @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given
+ * filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
*/
-void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid);
+
+/**
+ * @brief ecore_llh_clear_all_filters - Remove all LLH filters
+ *
+ * @param p_dev
+ */
+void ecore_llh_clear_all_filters(struct ecore_dev *p_dev);
/**
* @brief ecore_llh_set_function_as_default - set function as default per port
@@ -701,4 +777,13 @@ ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_enable);
+
+/**
+ * @brief Whether FIP discovery fallback special mode is enabled or not.
+ *
+ * @param cdev
+ *
+ * @return true if device is in FIP special mode, false otherwise.
+ */
+bool ecore_is_mf_fip_special(struct ecore_dev *p_dev);
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 2d761b97..6d4a4dd7 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -922,7 +922,11 @@ struct core_rx_start_ramrod_data {
struct core_rx_action_on_error action_on_error;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
- u8 reserved[6];
+/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
+ * zero out, used for TenantDcb
+ */
+ u8 wipe_inner_vlan_pri_en;
+ u8 reserved[5];
};
@@ -1044,7 +1048,11 @@ struct core_tx_start_ramrod_data {
__le16 qm_pq_id /* QM PQ ID */;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
- u8 resrved[3];
+/* vport id of the current connection, used to access non_rdma_in_to_in_pri_map
+ * which is per vport
+ */
+ u8 vport_id;
+ u8 resrved[2];
};
@@ -1171,6 +1179,25 @@ struct eth_rx_rate_limit {
};
+/* Update RSS indirection table entry command. One outstanding command supported
+ * per PF.
+ */
+struct eth_tstorm_rss_update_data {
+/* Valid flag. Driver must set this flag, FW clear valid flag when ready for new
+ * RSS update command.
+ */
+ u8 valid;
+/* Global VPORT ID. If RSS is disable for VPORT, RSS update command will be
+ * ignored.
+ */
+ u8 vport_id;
+ u8 ind_table_index /* RSS indirect table index that will be updated. */;
+ u8 reserved;
+ __le16 ind_table_value /* RSS indirect table new value. */;
+ __le16 reserved1 /* reserved. */;
+};
+
+
struct eth_ustorm_per_pf_stat {
/* number of total ucast bytes received on loopback port without errors */
struct regpair rcv_lb_ucast_bytes;
@@ -1463,6 +1490,10 @@ struct pf_start_tunnel_config {
* FW will use a default port
*/
u8 set_geneve_udp_port_flg;
+/* Set no-innet-L2 VXLAN tunnel UDP destination port to
+ * no_inner_l2_vxlan_udp_port. If not set - FW will use a default port
+ */
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
/* Rx classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
@@ -1470,11 +1501,15 @@ struct pf_start_tunnel_config {
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
- u8 reserved;
/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
__le16 vxlan_udp_port;
/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
__le16 geneve_udp_port;
+/* no-innet-L2 VXLAN tunnel UDP destination port. Valid if
+ * set_no_inner_l2_vxlan_udp_port_flg=1
+ */
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved[3];
};
/*
@@ -1547,6 +1582,8 @@ struct pf_update_tunnel_config {
u8 set_vxlan_udp_port_flg;
/* Update GENEVE tunnel UDP destination port. */
u8 set_geneve_udp_port_flg;
+/* Update no-innet-L2 VXLAN tunnel UDP destination port. */
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
/* Classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
@@ -1554,9 +1591,12 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ u8 reserved;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
- __le16 reserved;
+/* no-innet-L2 VXLAN tunnel UDP destination port. */
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved1[3];
};
/*
@@ -1686,6 +1726,13 @@ struct rl_update_ramrod_data {
/* ID of last RL, that will be updated. If clear, single RL will updated. */
u8 rl_id_last;
u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
+/* If set, alpha will be reset to 1 when the state machine is idle. */
+ u8 dcqcn_reset_alpha_on_idle;
+/* Byte counter threshold to change rate increase stage. */
+ u8 rl_bc_stage_th;
+/* Timer threshold to change rate increase stage. */
+ u8 rl_timer_stage_th;
+ u8 reserved1;
__le32 rl_bc_rate /* Byte Counter Limit. */;
__le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
__le16 rl_r_ai /* Active increase rate. */;
@@ -1694,7 +1741,7 @@ struct rl_update_ramrod_data {
__le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
__le32 dcqcn_timeuot_us /* DCQCN timeout. */;
__le32 qcn_timeuot_us /* QCN timeout. */;
- __le32 reserved[2];
+ __le32 reserved2;
};
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index bf548722..085af0a3 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1091,6 +1091,15 @@ struct idle_chk_data {
};
/*
+ * Pretend parameters
+ */
+struct pretend_params {
+ u8 split_type /* Pretend split type (from enum init_split_types) */;
+ u8 reserved;
+ u16 split_id /* Preted split ID (within the pretend split type) */;
+};
+
+/*
* Debug Tools data (per HW function)
*/
struct dbg_tools_data {
@@ -1102,11 +1111,17 @@ struct dbg_tools_data {
u8 block_in_reset[88];
u8 chip_id /* Chip ID (from enum chip_ids) */;
u8 platform_id /* Platform ID */;
+ u8 num_ports /* Number of ports in the chip */;
+ u8 num_pfs_per_port /* Number of PFs in each port */;
+ u8 num_vfs /* Number of VFs in the chip */;
u8 initialized /* Indicates if the data was initialized */;
u8 use_dmae /* Indicates if DMAE should be used */;
+ u8 reserved;
+ struct pretend_params pretend /* Current pretend parameters */;
/* Numbers of registers that were read since last log */
u32 num_regs_read;
};
+
#endif /* __ECORE_HSI_DEBUG_TOOLS__ */
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 6b512305..158ca673 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -832,6 +832,26 @@ enum eth_filter_type {
/*
+ * inner to inner vlan priority translation configurations
+ */
+struct eth_in_to_in_pri_map_cfg {
+/* If set, non_rdma_in_to_in_pri_map or rdma_in_to_in_pri_map will be used for
+ * inner to inner priority mapping depending on protocol type
+ */
+ u8 inner_vlan_pri_remap_en;
+ u8 reserved[7];
+/* Map for inner to inner vlan priority translation for Non RDMA protocols, used
+ * for TenantDcb. Set inner_vlan_pri_remap_en, when init the map.
+ */
+ u8 non_rdma_in_to_in_pri_map[8];
+/* Map for inner to inner vlan priority translation for RDMA protocols, used for
+ * TenantDcb. Set inner_vlan_pri_remap_en, when init the map.
+ */
+ u8 rdma_in_to_in_pri_map[8];
+};
+
+
+/*
* eth IPv4 Fragment Type
*/
enum eth_ipv4_frag_type {
@@ -1030,8 +1050,11 @@ struct eth_vport_rx_mode {
/* accept all broadcast packets (subject to vlan) */
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
-#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+/* accept any VNI in tunnel VNI classification. Used for default queue. */
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7
};
@@ -1357,6 +1380,20 @@ struct tx_queue_update_ramrod_data {
};
+/*
+ * Inner to Inner VLAN priority map update mode
+ */
+enum update_in_to_in_pri_map_mode_enum {
+/* Inner to Inner VLAN priority map update Disabled */
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED,
+/* Update Inner to Inner VLAN priority map for non RDMA protocols */
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL,
+/* Update Inner to Inner VLAN priority map for RDMA protocols */
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL,
+ MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM
+};
+
+
/*
* Ramrod data for vport update ramrod
@@ -1405,7 +1442,12 @@ struct vport_start_ramrod_data {
u8 ctl_frame_mac_check_en;
/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
- u8 reserved[1];
+/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
+ * zero out, used for TenantDcb
+ */
+ u8 wipe_inner_vlan_pri_en;
+/* inner to inner vlan priority translation configurations */
+ struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
};
@@ -1473,7 +1515,14 @@ struct vport_update_ramrod_data_cmn {
u8 ctl_frame_mac_check_en;
/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
- u8 reserved[15];
+/* Indicates to update RDMA or NON-RDMA vlan remapping priority table according
+ * to update_in_to_in_pri_map_mode_enum, used for TenantDcb (use enum
+ * update_in_to_in_pri_map_mode_enum)
+ */
+ u8 update_in_to_in_pri_map_mode;
+/* Map for inner to inner vlan priority translation, used for TenantDcb. */
+ u8 in_to_in_pri_map[8];
+ u8 reserved[6];
};
struct vport_update_ramrod_mcast {
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 51bba27e..72cd7e9c 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -407,6 +407,30 @@ void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
*(u32 *)&p_ptt->pxp.pretend);
}
+void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 port_id, u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
@@ -426,14 +450,17 @@ u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
* If this changes, this needs to be revisted.
*/
-/* Ecore DMAE
- * =============
- */
+/* DMAE */
+
+#define ECORE_DMAE_FLAGS_IS_SET(params, flag) \
+ ((params) != OSAL_NULL && ((params)->flags & ECORE_DMAE_FLAG_##flag))
+
static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct ecore_dmae_params *p_params)
{
+ u8 src_pfid, dst_pfid, port_id;
u16 opcode_b = 0;
u32 opcode = 0;
@@ -443,16 +470,20 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
*/
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
- opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
- DMAE_CMD_SRC_PF_ID_SHIFT;
+ src_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ opcode |= (src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT;
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
- opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
- DMAE_CMD_DST_PF_ID_SHIFT;
+ dst_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ opcode |= (dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT;
- /* DMAE_E4_TODO need to check which value to specifiy here. */
+ /* DMAE_E4_TODO need to check which value to specify here. */
/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
/* Whether to write a completion word to the completion destination:
@@ -462,7 +493,7 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
- if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
+ if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
/* swapping mode 3 - big endian there should be a define ifdefed in
@@ -470,7 +501,9 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
*/
opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
- opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
+ port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
+ p_params->port_id : p_hwfn->port_id;
+ opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT;
/* reset source address in next go */
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
@@ -479,14 +512,14 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
- if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
+ if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
} else {
opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT);
}
- if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
+ if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
@@ -831,7 +864,7 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
for (i = 0; i <= cnt_split; i++) {
offset = length_limit * i;
- if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
+ if (!ECORE_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
if (src_type == ECORE_DMAE_ADDRESS_GRC)
src_addr_split = src_addr + offset;
else
@@ -872,51 +905,45 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
return ecore_status;
}
-enum _ecore_status_t
-ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u64 source_addr,
- u32 grc_addr, u32 size_in_dwords, u32 flags)
+enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct ecore_dmae_params params;
enum _ecore_status_t rc;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
- params.flags = flags;
-
OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
grc_addr_in_dw,
ECORE_DMAE_ADDRESS_HOST_VIRT,
ECORE_DMAE_ADDRESS_GRC,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
-enum _ecore_status_t
-ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct ecore_dmae_params params;
enum _ecore_status_t rc;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
- params.flags = flags;
-
OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, ECORE_DMAE_ADDRESS_GRC,
ECORE_DMAE_ADDRESS_HOST_VIRT,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
@@ -965,7 +992,6 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
const char *phase)
{
u32 size = OSAL_PAGE_SIZE / 2, val;
- struct ecore_dmae_params params;
enum _ecore_status_t rc = ECORE_SUCCESS;
dma_addr_t p_phys;
void *p_virt;
@@ -997,9 +1023,9 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
(unsigned long)(p_phys + size),
(u8 *)p_virt + size, size);
- OSAL_MEMSET(&params, 0, sizeof(params));
rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
- size / 4 /* size_in_dwords */, &params);
+ size / 4 /* size_in_dwords */,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
@@ -1030,3 +1056,32 @@ out:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
return rc;
}
+
+void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr, u32 val)
+{
+ u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
+
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+ ecore_wr(p_hwfn, p_ptt, hw_addr, val);
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+}
+
+u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr)
+{
+ u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
+ u32 val;
+
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+ val = ecore_rd(p_hwfn, p_ptt, hw_addr);
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ return val;
+}
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 394207eb..0b5b40c4 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -134,8 +134,8 @@ struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * @param val
* @param hw_addr
+ * @param val
*/
void ecore_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -147,7 +147,6 @@ void ecore_wr(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * @param val
* @param hw_addr
*/
u32 ecore_rd(struct ecore_hwfn *p_hwfn,
@@ -223,6 +222,18 @@ void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
+ * @brief ecore_port_fid_pretend - pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
+ */
+void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 port_id, u16 fid);
+
+/**
* @brief ecore_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
@@ -257,4 +268,29 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
const char *phase);
+/**
+ * @brief ecore_ppfid_wr - Write value to BAR using the given ptt while
+ * pretending to a PF to which the given PPFID pertains.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param abs_ppfid
+ * @param hw_addr
+ * @param val
+ */
+void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr, u32 val);
+
+/**
+ * @brief ecore_ppfid_rd - Read value from BAR using the given ptt while
+ * pretending to a PF to which the given PPFID pertains.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param abs_ppfid
+ * @param hw_addr
+ */
+u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr);
+
#endif /* __ECORE_HW_H__ */
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index b8496cb2..cfc1156e 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1665,7 +1665,7 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
bool ipv6,
enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
@@ -1729,6 +1729,9 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
+ /* Search no IP as GFT */
+ search_non_ip_as_gft = 0;
+
/* Tunnel type */
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
@@ -1752,8 +1755,13 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+
+ /* Allow tunneled traffic without inner IP */
+ search_non_ip_as_gft = 1;
}
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
+ search_non_ip_as_gft);
ecore_wr(p_hwfn, p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
ram_line_lo);
@@ -1996,52 +2004,49 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
-#define RSS_IND_TABLE_BASE_ADDR 4112
-#define RSS_IND_TABLE_VPORT_SIZE 16
-#define RSS_IND_TABLE_ENTRY_PER_LINE 8
-/* Update RSS indirection table entry. */
-void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 rss_id,
- u8 ind_table_index,
- u16 ind_table_value)
+/*******************************************************************************
+ * File name : rdma_init.c
+ * Author : Michael Shteinbok
+ *******************************************************************************
+ *******************************************************************************
+ * Description:
+ * RDMA HSI functions
+ *
+ *******************************************************************************
+ * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
+ *
+ *******************************************************************************
+ */
+static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
+ u8 storm_id)
{
- u32 cnt, rss_addr;
- u32 *reg_val;
- u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
- u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
-
- /* get entry address */
- rss_addr = RSS_IND_TABLE_BASE_ADDR +
- RSS_IND_TABLE_VPORT_SIZE * rss_id +
- ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
-
- /* prepare update command */
- ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
-
- for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
- if (cnt == ind_table_index) {
- rss_ind_entry[cnt] = ind_table_value;
- rss_ind_mask[cnt] = 0xFFFF;
- } else {
- rss_ind_entry[cnt] = 0;
- rss_ind_mask[cnt] = 0;
- }
+ switch (storm_id) {
+ case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+
+ default: return 0;
}
+}
- /* Update entry in HW*/
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
-
- reg_val = (u32 *)rss_ind_mask;
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
+void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS])
+{
+ u8 storm_id;
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
- reg_val = (u32 *)rss_ind_entry;
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
+ ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
+ }
}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 1024bb26..3503a90c 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -472,21 +472,35 @@ void ecore_memset_task_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
-/**
- * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table
- * entry.
- * The function must run in exclusive mode to prevent wrong RSS configuration.
+
+/*******************************************************************************
+ * File name : rdma_init.h
+ * Author : Michael Shteinbok
+ *******************************************************************************
+ *******************************************************************************
+ * Description:
+ * RDMA HSI functions header
+ *
+ *******************************************************************************
+ * Notes: This is the input to the auto generated file drv_init_fw_funcs.h
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param rss_id - RSS engine ID.
- * @param ind_table_index - RSS indirect table index.
- * @param ind_table_value - RSS indirect table new value.
+ *******************************************************************************
*/
-void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 rss_id,
- u8 ind_table_index,
- u16 ind_table_value);
+#define NUM_STORMS 6
+
+
+
+/**
+ * @brief ecore_set_rdma_error_level - Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param assert_level - An array of assert levels for each storm.
+ */
+void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS]);
+
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index b7636f36..044308bf 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -101,7 +101,8 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_init_val + i),
- addr + (i << 2), segment, 0);
+ addr + (i << 2), segment,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS)
return rc;
@@ -165,8 +166,9 @@ static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
} else {
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_buf +
- dmae_data_offset),
- addr, size, 0);
+ dmae_data_offset),
+ addr, size,
+ OSAL_NULL /* default parameters */);
}
return rc;
@@ -177,13 +179,15 @@ static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+ struct ecore_dmae_params params;
OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
return ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)&zero_buffer[0],
- addr, fill_count,
- ECORE_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, &params);
}
static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
@@ -416,11 +420,11 @@ static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
u16 *p_offset, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
+ const u8 *modes_tree;
- modes_tree_buf = p_dev->fw_data->modes_tree_buf;
- tree_val = modes_tree_buf[(*p_offset)++];
+ modes_tree = p_dev->fw_data->modes_tree_buf;
+ tree_val = modes_tree[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
@@ -470,12 +474,12 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 cmd_num, num_init_ops;
- union init_op *init_ops;
+ union init_op *init;
bool b_dmae = false;
enum _ecore_status_t rc = ECORE_SUCCESS;
num_init_ops = p_dev->fw_data->init_ops_size;
- init_ops = p_dev->fw_data->init_ops;
+ init = p_dev->fw_data->init_ops;
#ifdef CONFIG_ECORE_ZIPPED_FW
p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
@@ -487,7 +491,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
#endif
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
- union init_op *cmd = &init_ops[cmd_num];
+ union init_op *cmd = &init[cmd_num];
u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 4c271d35..7368d55f 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -428,14 +428,13 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
-#define ECORE_DB_REC_COUNT 10
+#define ECORE_DB_REC_COUNT 1000
#define ECORE_DB_REC_INTERVAL 100
-/* assumes sticky overflow indication was set for this PF */
-static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static enum _ecore_status_t ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- u8 count = ECORE_DB_REC_COUNT;
+ u32 count = ECORE_DB_REC_COUNT;
u32 usage = 1;
/* wait for usage to zero or count to run out. This is necessary since
@@ -461,6 +460,28 @@ static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
return ECORE_TIMEOUT;
}
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 overflow;
+ enum _ecore_status_t rc;
+
+ overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ DP_NOTICE(p_hwfn, false, "PF Overflow sticky 0x%x\n", overflow);
+ if (!overflow) {
+ ecore_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+ return ECORE_SUCCESS;
+ }
+
+ if (ecore_edpm_enabled(p_hwfn)) {
+ rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
/* flush any pedning (e)dpm as they may never arrive */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
@@ -477,8 +498,7 @@ static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
{
- u32 int_sts, first_drop_reason, details, address, overflow,
- all_drops_reason;
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
enum _ecore_status_t rc;
@@ -504,8 +524,6 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
DORQ_REG_DB_DROP_DETAILS);
address = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS);
- overflow = ecore_rd(p_hwfn, p_ptt,
- DORQ_REG_PF_OVFL_STICKY);
all_drops_reason = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_REASON);
@@ -516,19 +534,16 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
"FID\t\t0x%04x\t\t(Opaque FID)\n"
"Size\t\t0x%04x\t\t(in bytes)\n"
"1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
- "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
- "Overflow\t0x%x\t\t(a per PF indication)\n",
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
address,
GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
- first_drop_reason, all_drops_reason, overflow);
+ first_drop_reason, all_drops_reason);
- /* if this PF caused overflow, initiate recovery */
- if (overflow) {
- rc = ecore_db_rec_attn(p_hwfn, p_ptt);
- if (rc != ECORE_SUCCESS)
- return rc;
- }
+ rc = ecore_db_rec_handler(p_hwfn, p_ptt);
+ OSAL_DB_REC_OCCURRED(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ return rc;
/* clear the doorbell drop details and prepare for next drop */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
@@ -1209,8 +1224,9 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *igu_addr, u32 ack_cons)
{
- struct igu_prod_cons_update igu_ack = { 0 };
+ struct igu_prod_cons_update igu_ack;
+ OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update));
igu_ack.sb_id_and_flags =
((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
@@ -1546,11 +1562,13 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&phys_addr,
CAU_REG_SB_ADDR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
@@ -2631,7 +2649,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64),
- (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ (u64)(osal_uintptr_t)&sb_entry, 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2644,8 +2663,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&sb_entry,
- CAU_REG_SB_VAR_MEMORY +
- sb_id * sizeof(u64), 2, 0);
+ CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
return rc;
@@ -2681,3 +2700,35 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+ int i;
+
+ /* Do not reorder the following cleanup sequence */
+ /* Ack all attentions */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff);
+
+ /* Clear driver attention */
+ ecore_wr(p_hwfn, p_dpc_ptt,
+ ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0);
+
+ /* Clear per-PF IGU registers to restore them as if the IGU
+ * was reset for this PF
+ */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+
+ /* Execute IGU clean up*/
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1);
+
+ /* Clear Stats */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0);
+
+ for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0);
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 041240d7..ff2310cf 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -256,5 +256,6 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_hw_init);
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index aeaf469e..42538a46 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -92,8 +92,9 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
enum igu_int_cmd int_cmd, u8 upd_flg)
{
- struct igu_prod_cons_update igu_ack = { 0 };
+ struct igu_prod_cons_update igu_ack;
+ OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update));
igu_ack.sb_id_and_flags =
((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
@@ -343,4 +344,15 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u16 sb_id, bool b_to_vf);
+
+/**
+ * @brief - Doorbell Recovery handler.
+ * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
+ * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 29001d71..55de7086 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -84,6 +84,13 @@ struct ecore_public_vf_info {
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
+
+ /* Trusted VFs can configure promiscuous mode and
+ * set MAC address inspite PF has set forced MAC.
+ * Also store shadow promisc configuration if needed.
+ */
+ bool is_trusted_configured;
+ bool is_trusted_request;
};
struct ecore_iov_vf_init_params {
@@ -695,6 +702,16 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
*/
int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+/**
+ * @brief - Configure min rate for VF's vport.
+ * @param p_dev
+ * @param vfid
+ * @param - rate in Mbps
+ *
+ * @return
+ */
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+ int vfid, u32 rate);
#endif
/**
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index 05693029..12d45c1c 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -113,91 +113,129 @@
/* Tstorm Eth limit Rx rate */
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) (IRO[30].base + \
+ ((pf_id) * IRO[30].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
/* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[30].base + \
- ((queue_id) * IRO[30].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[31].base + \
+ ((queue_id) * IRO[31].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[31].base + \
- ((rss_id) * IRO[31].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
((rss_id) * IRO[32].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[33].base + \
+ ((rss_id) * IRO[33].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[33].base + \
- ((pf_id) * IRO[33].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[34].base + \
+ ((pf_id) * IRO[34].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[34].base + \
- ((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[35].base + \
+ ((cmdq_queue_id) * IRO[35].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
* BDqueue-id
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[35].base + \
- ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
-/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[37].base + \
+ ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[37].base + \
- ((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[40].base + \
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[40].base + \
((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[43].base + \
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[43].base + \
((pf_id) * IRO[43].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[44].base + \
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[44].base + \
((pf_id) * IRO[44].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[45].base + \
+ ((pf_id) * IRO[45].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
/* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-/* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[47].base + \
+ ((rdma_stat_counter_id) * IRO[47].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[48].base + \
+ ((pf_id) * IRO[48].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[49].base + \
+ ((pf_id) * IRO[49].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[50].base + \
+ ((pf_id) * IRO[50].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[51].base + \
+ ((pf_id) * IRO[51].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[52].base + \
+ ((pf_id) * IRO[52].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[53].base + \
+ ((pf_id) * IRO[53].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
/* Xstorm iWARP rxmit stats */
-#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + \
- ((pf_id) * IRO[47].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[54].base + \
+ ((pf_id) * IRO[54].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
- ((roce_pf_id) * IRO[48].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[55].base + \
+ ((roce_pf_id) * IRO[55].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[49].base + \
- ((roce_pf_id) * IRO[49].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[56].base + \
+ ((roce_pf_id) * IRO[56].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) (IRO[57].base + \
+ ((roce_pf_id) * IRO[57].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[50].base + \
- ((roce_pf_id) * IRO[50].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[58].base + \
+ ((roce_pf_id) * IRO[58].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) (IRO[59].base + \
+ ((roce_pf_id) * IRO[59].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 685fa2e8..30e632ce 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -7,7 +7,7 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[51] = {
+static const struct iro iro_arr[60] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
@@ -29,7 +29,7 @@ static const struct iro iro_arr[51] = {
/* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3e38, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x2b78, 0x0, 0x0, 0x0, 0x78},
+ { 0x3ef8, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* MSTORM_INTEG_TEST_DATA_OFFSET */
@@ -43,7 +43,7 @@ static const struct iro iro_arr[51] = {
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
- { 0x96c0, 0x30, 0x0, 0x0, 0x30},
+ { 0xa990, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x4b68, 0x80, 0x0, 0x0, 0x40},
/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
@@ -59,15 +59,17 @@ static const struct iro iro_arr[51] = {
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0xe770, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x2d10, 0x80, 0x0, 0x0, 0x38},
+ { 0x4090, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf2b8, 0x78, 0x0, 0x0, 0x78},
+ { 0xfea8, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
{ 0xaf20, 0x0, 0x0, 0x0, 0xf0},
/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
{ 0xb010, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) */
+ { 0xc00, 0x8, 0x0, 0x0, 0x8},
/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8},
/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
@@ -91,25 +93,41 @@ static const struct iro iro_arr[51] = {
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xa588, 0x50, 0x0, 0x0, 0x20},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x8700, 0x40, 0x0, 0x0, 0x28},
+ { 0x8f00, 0x40, 0x0, 0x0, 0x28},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x10300, 0x18, 0x0, 0x0, 0x10},
+ { 0x10e30, 0x18, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
{ 0xde48, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
- { 0x10768, 0x20, 0x0, 0x0, 0x20},
+ { 0x11298, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x2d48, 0x80, 0x0, 0x0, 0x10},
+ { 0x40c8, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x5048, 0x10, 0x0, 0x0, 0x10},
+/* XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0xa928, 0x8, 0x0, 0x0, 0x1},
+/* YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0xa128, 0x8, 0x0, 0x0, 0x1},
+/* PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0x11a30, 0x8, 0x0, 0x0, 0x1},
+/* TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0xf030, 0x8, 0x0, 0x0, 0x1},
+/* MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0x13028, 0x8, 0x0, 0x0, 0x1},
+/* USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0x12c58, 0x8, 0x0, 0x0, 0x1},
/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
{ 0xc9b8, 0x30, 0x0, 0x0, 0x10},
/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
- { 0xed90, 0x10, 0x0, 0x0, 0x10},
+ { 0xed90, 0x28, 0x0, 0x0, 0x28},
/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
- { 0xa520, 0x10, 0x0, 0x0, 0x10},
+ { 0xad20, 0x18, 0x0, 0x0, 0x18},
+/* YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) */
+ { 0xaea0, 0x8, 0x0, 0x0, 0x8},
/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
- { 0x13108, 0x8, 0x0, 0x0, 0x8},
+ { 0x13c38, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) */
+ { 0x13c50, 0x18, 0x0, 0x0, 0x18},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index d71f4616..8b9817eb 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -608,6 +608,9 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & ECORE_ACCEPT_BCAST));
+ SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
+ !!(accept_filter & ECORE_ACCEPT_ANY_VNI));
+
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
@@ -783,6 +786,11 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
return rc;
}
+ if (p_params->update_ctl_frame_check) {
+ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+ }
+
/* Update mcast bins for VFs, PF doesn't use this functionality */
ecore_sp_update_mcast_bin(p_ramrod, p_params);
@@ -2084,6 +2092,24 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
}
}
+static enum gft_profile_type
+ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode)
+{
+ if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE)
+ return GFT_PROFILE_TYPE_4_TUPLE;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST)
+ return GFT_PROFILE_TYPE_IP_DST_ADDR;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_TUNN_TYPE)
+ return GFT_PROFILE_TYPE_TUNNEL_TYPE;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_IP_SRC)
+ return GFT_PROFILE_TYPE_IP_SRC_ADDR;
+
+ return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params)
@@ -2091,13 +2117,13 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
return;
- if (p_cfg_params->arfs_enable) {
+ if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) {
ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp,
p_cfg_params->udp,
p_cfg_params->ipv4,
p_cfg_params->ipv6,
- GFT_PROFILE_TYPE_4_TUPLE);
+ ecore_arfs_mode_to_hsi(p_cfg_params->mode));
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
p_cfg_params->tcp ? "Enable" : "Disable",
@@ -2107,8 +2133,8 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
} else {
ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
- p_cfg_params->arfs_enable ? "Enable" : "Disable");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %d\n",
+ (int)p_cfg_params->mode);
}
enum _ecore_status_t
@@ -2179,10 +2205,10 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_queue_cid *p_cid,
- u16 *p_rx_coal)
+enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_rx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
@@ -2191,7 +2217,8 @@ int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ (u64)(osal_uintptr_t)&sb_entry, 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2213,10 +2240,10 @@ int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_queue_cid *p_cid,
- u16 *p_tx_coal)
+enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_tx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
@@ -2225,7 +2252,8 @@ int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ (u64)(osal_uintptr_t)&sb_entry, 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2302,3 +2330,55 @@ ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
p_link->speed);
}
+
+#define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT 100
+#define RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US 1
+
+enum _ecore_status_t
+ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u8 ind_table_index,
+ u16 ind_table_value)
+{
+ struct eth_tstorm_rss_update_data update_data = { 0 };
+ void OSAL_IOMEM *addr = OSAL_NULL;
+ enum _ecore_status_t rc;
+ u8 abs_vport_id;
+ u32 cnt = 0;
+
+ OSAL_BUILD_BUG_ON(sizeof(update_data) != sizeof(u64));
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn->rel_pf_id);
+
+ *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr);
+
+ for (cnt = 0; update_data.valid &&
+ cnt < RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT; cnt++) {
+ OSAL_UDELAY(RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US);
+ *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr);
+ }
+
+ if (update_data.valid) {
+ DP_NOTICE(p_hwfn, true,
+ "rss update valid status is not clear! valid=0x%x vport id=%d ind_Table_idx=%d ind_table_value=%d.\n",
+ update_data.valid, vport_id, ind_table_index,
+ ind_table_value);
+
+ return ECORE_AGAIN;
+ }
+
+ update_data.valid = 1;
+ update_data.ind_table_index = ind_table_index;
+ update_data.ind_table_value = ind_table_value;
+ update_data.vport_id = abs_vport_id;
+
+ DIRECT_REG_WR64(p_hwfn, addr, *(u64 *)(&update_data));
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 575b9e3a..004fb61b 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -137,6 +137,16 @@ struct ecore_filter_accept_flags {
#define ECORE_ACCEPT_MCAST_MATCHED 0x08
#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
#define ECORE_ACCEPT_BCAST 0x20
+#define ECORE_ACCEPT_ANY_VNI 0x40
+};
+
+enum ecore_filter_config_mode {
+ ECORE_FILTER_CONFIG_MODE_DISABLE,
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE,
+ ECORE_FILTER_CONFIG_MODE_L4_PORT,
+ ECORE_FILTER_CONFIG_MODE_IP_DEST,
+ ECORE_FILTER_CONFIG_MODE_TUNN_TYPE,
+ ECORE_FILTER_CONFIG_MODE_IP_SRC,
};
struct ecore_arfs_config_params {
@@ -144,7 +154,7 @@ struct ecore_arfs_config_params {
bool udp;
bool ipv4;
bool ipv6;
- bool arfs_enable; /* Enable or disable arfs mode */
+ enum ecore_filter_config_mode mode;
};
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
@@ -337,7 +347,10 @@ struct ecore_sp_vport_update_params {
/* MTU change - notice this requires the vport to be disabled.
* If non-zero, value would be used.
*/
- u16 mtu;
+ u16 mtu;
+ u8 update_ctl_frame_check;
+ u8 mac_chk_en;
+ u8 ethtype_chk_en;
};
/**
@@ -460,4 +473,28 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
bool b_is_add);
+
+/**
+ * @brief - ecore_update_eth_rss_ind_table_entry
+ *
+ * This function being used to update RSS indirection table entry to FW RAM
+ * instead of using the SP vport update ramrod with rss params.
+ *
+ * Notice:
+ * This function supports only one outstanding command per engine. Ecore
+ * clients which use this function should call ecore_mcp_ind_table_lock() prior
+ * to it and ecore_mcp_ind_table_unlock() after it.
+ *
+ * @params p_hwfn
+ * @params vport_id
+ * @params ind_table_index
+ * @params ind_table_value
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u8 ind_table_index,
+ u16 ind_table_value);
#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index ea14c172..6c656068 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -177,10 +177,16 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define ECORE_MCP_SHMEM_RDY_MAX_RETRIES 20
+#define ECORE_MCP_SHMEM_RDY_ITER_MS 50
+
static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+ u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
+ u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
@@ -198,6 +204,35 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
p_info->public_base |= GRCBASE_MCP;
+ /* Get the MFW MB address and number of supported messages */
+ mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr);
+
+ /* @@@TBD:
+ * The driver can notify that there was an MCP reset, and read the SHMEM
+ * values before the MFW has completed initializing them.
+ * As a temporary solution, the "sup_msgs" field is used as a data ready
+ * indication.
+ * This should be replaced with an actual indication when it is provided
+ * by the MFW.
+ */
+ while (!p_info->mfw_mb_length && cnt--) {
+ OSAL_MSLEEP(msec);
+ p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr);
+ }
+
+ if (!cnt) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to get the SHMEM ready notification after %d msec\n",
+ ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+ return ECORE_TIMEOUT;
+ }
+
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -208,14 +243,6 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
" mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
- /* Set the MFW MB address */
- mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_info->public_base,
- PUBLIC_MFW_MB));
- p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
- p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
- p_info->mfw_mb_addr);
-
/* Get the current driver mailbox sequence before sending
* the first command
*/
@@ -1656,6 +1683,49 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
&param);
}
+static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
+ ecore_sp_pf_update_stag(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+ OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
+}
+
static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
@@ -1946,7 +2016,7 @@ ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
val);
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"UFP shmem config: mode = %d tc = %d pri_type = %d\n",
p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
p_hwfn->ufp_info.pri_type);
@@ -2041,6 +2111,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
case MFW_DRV_MSG_BW_UPDATE:
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ ecore_mcp_update_stag(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_FAILURE_DETECTED:
ecore_mcp_handle_fan_failure(p_hwfn);
break;
@@ -2155,8 +2228,10 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_tranceiver_type)
+ u32 *p_transceiver_state,
+ u32 *p_transceiver_type)
{
+ u32 transceiver_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* TODO - Add support for VFs */
@@ -2167,14 +2242,23 @@ enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
- if (!p_ptt) {
- *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
- rc = ECORE_INVAL;
+
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+ transceiver_info = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ *p_transceiver_state = GET_MFW_FIELD(transceiver_info,
+ ETH_TRANSCEIVER_STATE);
+
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
+ *p_transceiver_type = GET_MFW_FIELD(transceiver_info,
+ ETH_TRANSCEIVER_TYPE);
} else {
- *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
- p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- transceiver_data));
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
}
return rc;
@@ -2194,15 +2278,11 @@ enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_speed_mask)
{
- u32 transceiver_data, transceiver_type, transceiver_state;
+ u32 transceiver_type, transceiver_state;
- ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
+ ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
- transceiver_state = GET_MFW_FIELD(transceiver_data,
- ETH_TRANSCEIVER_STATE);
-
- transceiver_type = GET_MFW_FIELD(transceiver_data,
- ETH_TRANSCEIVER_TYPE);
if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
return ECORE_INVAL;
@@ -2823,10 +2903,72 @@ ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
enum _ecore_status_t
-ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 mtu)
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 mtu)
{
- return 0;
+ u32 resp = 0, param = 0, drv_mb_param = 0;
+ enum _ecore_status_t rc;
+
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 *mac)
+{
+ struct ecore_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
+ DRV_MSG_CODE_VMAC_TYPE_MAC);
+ mb_params.param |= MCP_PF_ID(p_hwfn);
+ OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
+ mb_params.p_data_src = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_ov_eswitch eswitch)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+
+ switch (eswitch) {
+ case ECORE_OV_ESWITCH_NONE:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
+ break;
+ case ECORE_OV_ESWITCH_VEB:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
+ break;
+ case ECORE_OV_ESWITCH_VEPA:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
+
+ return rc;
}
enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
@@ -2938,11 +3080,11 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
}
enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
- u32 addr, u8 *p_buf, u32 len)
+ u32 addr, u8 *p_buf, u32 *p_len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
- u32 resp, param;
+ u32 resp = 0, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -2953,7 +3095,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
(cmd == ECORE_PHY_CORE_READ) ?
DRV_MSG_CODE_PHY_CORE_READ :
DRV_MSG_CODE_PHY_RAW_READ,
- addr, &resp, &param, &len, (u32 *)p_buf);
+ addr, &resp, &param, p_len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
@@ -2982,7 +3124,7 @@ enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
- u32 resp, param;
+ u32 resp = 0, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -3001,7 +3143,7 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
- u32 resp, param;
+ u32 resp = 0, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -3095,8 +3237,8 @@ enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 resp = 0, param, nvm_cmd;
struct ecore_ptt *p_ptt;
- u32 resp, param, nvm_cmd;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -4002,13 +4144,83 @@ ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mcp_mb_params mb_params;
+ u8 fir_valid, l2_valid;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_engine_config command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
+ if (fir_valid)
+ p_dev->fir_affin =
+ GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
+
+ l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
+ if (l2_valid)
+ p_dev->l2_affin_hint =
+ GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
+
+ DP_INFO(p_hwfn,
+ "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
+ fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_ppfid_bitmap command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_PPFID_BITMAP);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
+ p_dev->ppfid_bitmap);
+
+ return ECORE_SUCCESS;
+}
+
void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u32 offset, u32 val)
{
- struct ecore_mcp_mb_params mb_params = {0};
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 dword = val;
+ struct ecore_mcp_mb_params mb_params;
+ OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
mb_params.param = offset;
mb_params.p_data_src = &dword;
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 8e125310..2c052b7f 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -25,9 +25,6 @@
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ecore_device_num_ports((_p_hwfn)->p_dev))
-
struct ecore_mcp_info {
/* List for mailbox commands which were sent and wait for a response */
osal_list_t cmd_list;
@@ -566,4 +563,22 @@ ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u32 offset, u32 val);
+/**
+ * @brief Get the engine affinity configuration.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Get the PPFID bitmap.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index cfb9f99d..7327074f 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -185,6 +185,12 @@ enum ecore_ov_driver_state {
ECORE_OV_DRIVER_STATE_ACTIVE
};
+enum ecore_ov_eswitch {
+ ECORE_OV_ESWITCH_NONE,
+ ECORE_OV_ESWITCH_VEB,
+ ECORE_OV_ESWITCH_VEPA
+};
+
#define ECORE_MAX_NPIV_ENTRIES 128
#define ECORE_WWN_SIZE 8
struct ecore_fc_npiv_tbl {
@@ -521,6 +527,10 @@ union ecore_mfw_tlv_data {
struct ecore_mfw_tlv_iscsi iscsi;
};
+enum ecore_hw_info_change {
+ ECORE_HW_INFO_CHANGE_OVLAN,
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -597,6 +607,7 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
*
* @param p_dev - ecore dev pointer
* @param p_ptt
+ * @param p_transceiver_state - transceiver state.
* @param p_transceiver_type - media type value
*
* @return enum _ecore_status_t -
@@ -605,6 +616,7 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
+ u32 *p_transceiver_state,
u32 *p_tranceiver_type);
/**
@@ -810,6 +822,32 @@ enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mtu);
/**
+ * @brief Send MAC address to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mac - MAC address
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 *mac);
+
+/**
+ * @brief Send eswitch mode to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param eswitch - eswitch mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_ov_eswitch eswitch);
+
+/**
* @brief Set LED status
*
* @param p_hwfn
@@ -905,7 +943,7 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
- u32 addr, u8 *p_buf, u32 len);
+ u32 addr, u8 *p_buf, u32 *p_len);
/**
* @brief Read from nvm
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 721b8c15..3860e1a5 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -390,147 +390,146 @@
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021
-#define RUNTIME_ARRAY_SIZE 43023
+#define RUNTIME_ARRAY_SIZE 43022
/* Init Callbacks */
#define DMAE_READY_CB 0
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index b43baf9d..49a5ff55 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -515,6 +515,10 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
rl_update->rl_id_first = params->rl_id_first;
rl_update->rl_id_last = params->rl_id_last;
rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
+ rl_update->dcqcn_reset_alpha_on_idle =
+ params->dcqcn_reset_alpha_on_idle;
+ rl_update->rl_bc_stage_th = params->rl_bc_stage_th;
+ rl_update->rl_timer_stage_th = params->rl_timer_stage_th;
rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
rl_update->rl_max_rate =
OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
@@ -529,12 +533,14 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x,dcqcn_reset_alpha_on_idle %x, rl_bc_stage_th %x, rl_timer_stage_th %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
rl_update->qcn_update_param_flg,
rl_update->dcqcn_update_param_flg,
rl_update->rl_init_flg, rl_update->rl_start_flg,
rl_update->rl_stop_flg, rl_update->rl_id_first,
rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
+ rl_update->dcqcn_reset_alpha_on_idle,
+ rl_update->rl_bc_stage_th, rl_update->rl_timer_stage_th,
rl_update->rl_bc_rate, rl_update->rl_max_rate,
rl_update->rl_r_ai, rl_update->rl_r_hai,
rl_update->dcqcn_g, rl_update->dcqcn_k_us,
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index e57414cf..524fe57a 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -119,6 +119,9 @@ struct ecore_rl_update_params {
u8 rl_stop_flg;
u8 rl_id_first;
u8 rl_id_last;
+ u8 dcqcn_reset_alpha_on_idle;
+ u8 rl_bc_stage_th;
+ u8 rl_timer_stage_th;
u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
u32 rl_bc_rate; /* Byte Counter Limit */
u32 rl_max_rate; /* Maximum rate in Mbps resolution */
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 776c86f7..88ad961e 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -282,20 +282,30 @@ ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
ecore_spq_async_comp_cb cb;
+ enum _ecore_status_t rc;
- if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
+ DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id);
return ECORE_INVAL;
+ }
cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
- if (cb) {
- return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
- &p_eqe->data, p_eqe->fw_return_code);
- } else {
+ if (!cb) {
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return ECORE_INVAL;
}
+
+ rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, true,
+ "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]",
+ rc, p_eqe->opcode, p_eqe->echo,
+ p_eqe->fw_return_code);
+
+ return rc;
}
enum _ecore_status_t
@@ -339,10 +349,16 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
{
struct ecore_eq *p_eq = cookie;
struct ecore_chain *p_chain = &p_eq->chain;
- enum _ecore_status_t rc = 0;
+ u16 fw_cons_idx = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!p_hwfn->p_spq) {
+ DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
+ return ECORE_INVAL;
+ }
/* take a snapshot of the FW consumer */
- u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
+ fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
@@ -358,7 +374,8 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
if (!p_eqe) {
- rc = ECORE_INVAL;
+ DP_ERR(p_hwfn,
+ "Unexpected NULL chain consumer entry\n");
break;
}
@@ -374,15 +391,13 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
*/
p_eqe->flags);
- if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
- if (ecore_async_event_completion(p_hwfn, p_eqe))
- rc = ECORE_INVAL;
- } else if (ecore_spq_completion(p_hwfn,
- p_eqe->echo,
- p_eqe->fw_return_code,
- &p_eqe->data)) {
- rc = ECORE_INVAL;
- }
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC))
+ ecore_async_event_completion(p_hwfn, p_eqe);
+ else
+ ecore_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data);
ecore_chain_recycle_consumed(p_chain);
}
@@ -928,12 +943,11 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *found = OSAL_NULL;
enum _ecore_status_t rc;
- if (!p_hwfn)
- return ECORE_INVAL;
-
p_spq = p_hwfn->p_spq;
- if (!p_spq)
+ if (!p_spq) {
+ DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
return ECORE_INVAL;
+ }
OSAL_SPIN_LOCK(&p_spq->lock);
OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index f7ebf7ad..7d73ef9f 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -31,7 +31,7 @@ static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
union event_ring_data *data,
u8 fw_return_code);
-const char *ecore_channel_tlvs_string[] = {
+const char *qede_ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
"CHANNEL_TLV_VPORT_START",
@@ -218,7 +218,7 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
static struct ecore_queue_cid *
ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
if (p_queue->cids[i].p_cid &&
@@ -240,7 +240,7 @@ static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
enum ecore_iov_validate_q_mode mode,
bool b_is_tx)
{
- int i;
+ u32 i;
if (mode == ECORE_IOV_VALIDATE_Q_NA)
return true;
@@ -979,10 +979,12 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
p_hwfn->rel_pf_id,
vf->abs_vf_id, 1);
+
ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- p_block->igu_sb_id * sizeof(u64), 2, 0);
+ p_block->igu_sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
}
vf->num_sbs = (u8)num_rx_queues;
@@ -1278,7 +1280,7 @@ static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
ECORE_MSG_IOV,
"VF[%d]: vf pf channel locked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[tlv]);
+ qede_ecore_channel_tlvs_string[tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
@@ -1296,7 +1298,7 @@ static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
ECORE_MSG_IOV,
"VF[%d]: vf pf channel unlocked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[expected_tlv]);
+ qede_ecore_channel_tlvs_string[expected_tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
@@ -1336,7 +1338,7 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
if (ecore_iov_tlv_supported(tlv->type))
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV number %d: type %s, length %d\n",
- i, ecore_channel_tlvs_string[tlv->type],
+ i, qede_ecore_channel_tlvs_string[tlv->type],
tlv->length);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -1968,7 +1970,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
if ((events & (1 << MAC_ADDR_FORCED)) ||
- p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1989,7 +1992,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
return rc;
}
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
p_vf->configured_features |=
1 << VFPF_BULLETIN_MAC_ADDR;
else
@@ -2085,8 +2089,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_sp_vport_start_params params = { 0 };
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct ecore_sp_vport_start_params params;
struct vfpf_vport_start_tlv *start;
u8 status = PFVF_STATUS_SUCCESS;
struct ecore_vf_info *vf_info;
@@ -2137,6 +2141,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
}
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_start_params));
params.tpa_mode = start->tpa_mode;
params.remove_inner_vlan = start->inner_vlan_removal;
params.tx_switching = true;
@@ -2156,7 +2161,9 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
- params.check_mac = true;
+
+ /* Non trusted VFs should enable control frame filtering */
+ params.check_mac = !vf->p_vf_info.is_trusted_configured;
rc = ecore_sp_eth_vport_start(p_hwfn, &params);
if (rc != ECORE_SUCCESS) {
@@ -2912,7 +2919,7 @@ void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
if (p_tlv->type == req_type) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Extended tlv type %s, length %d found\n",
- ecore_channel_tlvs_string[p_tlv->type],
+ qede_ecore_channel_tlvs_string[p_tlv->type],
p_tlv->length);
return p_tlv;
}
@@ -3351,6 +3358,15 @@ ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
return ECORE_SUCCESS;
+ /* Since we don't have the implementation of the logic for removing
+ * a forced MAC and restoring shadow MAC, let's not worry about
+ * processing shadow copies of MAC as long as VF trust mode is ON,
+ * to keep things simple.
+ */
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
+ return ECORE_SUCCESS;
+
/* First remove entries and then add new ones */
if (p_params->opcode == ECORE_FILTER_REMOVE) {
for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
@@ -3653,7 +3669,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid;
u16 rx_coal, tx_coal;
u16 qid;
- int i;
+ u32 i;
req = &mbx->req_virt->update_coalesce;
@@ -3733,7 +3749,8 @@ ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid;
struct ecore_vf_info *vf;
struct ecore_ptt *p_ptt;
- int i, rc = 0;
+ int rc = 0;
+ u32 i;
if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
DP_NOTICE(p_hwfn, true,
@@ -4415,17 +4432,23 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
return;
}
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured) {
feature = 1 << VFPF_BULLETIN_MAC_ADDR;
- else
+ /* Trust mode will disable Forced MAC */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << MAC_ADDR_FORCED);
+ } else {
feature = 1 << MAC_ADDR_FORCED;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ }
- OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
+ mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- /* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
@@ -4460,7 +4483,8 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured)
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
return ECORE_SUCCESS;
@@ -4780,6 +4804,32 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
p_link->speed);
}
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+ int vfid, u32 rate)
+{
+ struct ecore_vf_info *vf;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn, true,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return ECORE_INVAL;
+ }
+ }
+
+ vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
+ if (!vf) {
+ DP_NOTICE(p_dev, true,
+ "Getting vf info failed, can't set min rate\n");
+ return ECORE_INVAL;
+ }
+
+ return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate);
+}
+
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,
@@ -4890,7 +4940,7 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
}
-enum _ecore_status_t
+int
ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_wfq_data *vf_vp_wfq;
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index d2213f79..3ba6a0cf 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -32,7 +32,7 @@ static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"preparing to send %s tlv over vf pf channel\n",
- ecore_channel_tlvs_string[type]);
+ qede_ecore_channel_tlvs_string[type]);
/* Reset Request offset */
p_iov->offset = (u8 *)(p_iov->vf2pf_request);
@@ -565,13 +565,20 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
phys,
p_iov->bulletin.
size);
+ if (!p_iov->bulletin.p_virt) {
+ DP_NOTICE(p_hwfn, false, "Failed to alloc bulletin memory\n");
+ goto free_pf2vf_reply;
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
p_iov->bulletin.size);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex)) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate p_iov->mutex\n");
+ goto free_bulletin_mem;
+ }
#endif
OSAL_MUTEX_INIT(&p_iov->mutex);
@@ -609,6 +616,16 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
return rc;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+free_bulletin_mem:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys,
+ p_iov->bulletin.size);
+#endif
+free_pf2vf_reply:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
free_vf2pf_request:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
p_iov->vf2pf_request_phys,
@@ -1167,7 +1184,7 @@ ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
return !!p_data->sge_tpa_params;
default:
DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
- tlv, ecore_channel_tlvs_string[tlv]);
+ tlv, qede_ecore_channel_tlvs_string[tlv]);
return false;
}
}
@@ -1191,7 +1208,7 @@ ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
if (p_resp && p_resp->hdr.status)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV[%d] type %s Configuration %s\n",
- tlv, ecore_channel_tlvs_string[tlv],
+ tlv, qede_ecore_channel_tlvs_string[tlv],
(p_resp && p_resp->hdr.status) ? "succeeded"
: "failed");
}
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index c30677ab..c7ecb01c 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -698,6 +698,6 @@ enum {
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
};
-extern const char *ecore_channel_tlvs_string[];
+extern const char *qede_ecore_channel_tlvs_string[];
#endif /* __ECORE_VF_PF_IF_H__ */
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index abfa6854..9a401ed4 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -178,6 +178,11 @@ struct eth_tx_1st_bd_flags {
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type) */
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type). In case of
+ * GRE tunnel, this flag means GRE CSO, and in this case GRE checksum field
+ * Must be present.
+ */
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 81aa88e7..13c2e2d1 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1258,6 +1258,17 @@ struct public_drv_mb {
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
+#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
+#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
+#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
+#define DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID 0x3c000000
+#define DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME 0x3d000000
+#define DRV_MSG_CODE_OEM_UPDATE_BOOT_CFG 0x3e000000
+#define DRV_MSG_CODE_OEM_RESET_TO_DEFAULT 0x3f000000
+#define DRV_MSG_CODE_OV_GET_CURR_CFG 0x40000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
+/* params [31:8] - reserved, [7:0] - bitmap */
+#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
@@ -1467,6 +1478,7 @@ struct public_drv_mb {
/* Param: Password len. Union: Plain Password */
#define DRV_MSG_CODE_ENCRYPT_PASSWORD 0x00360000
+#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000 /* Param: None */
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -1582,6 +1594,16 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
@@ -1677,6 +1699,8 @@ struct public_drv_mb {
#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
#define FW_MSG_CODE_RESOURCE_ALLOC_GEN_ERR 0x37000000
+#define FW_MSG_CODE_GET_OEM_UPDATES_DONE 0x41000000
+
#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
@@ -1778,11 +1802,31 @@ struct public_drv_mb {
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
/* MFW supports DRV_LOAD Timeout */
#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004
+/* MFW support complete IGU cleanup upon FLR */
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP 0x00000080
/* MFW supports virtual link */
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+#define FW_MB_PARAM_OEM_UPDATE_MASK 0xFF
+#define FW_MB_PARAM_OEM_UPDATE_OFFSET 0
+#define FW_MB_PARAM_OEM_UPDATE_BW 0x01
+#define FW_MB_PARAM_OEM_UPDATE_S_TAG 0x02
+#define FW_MB_PARAM_OEM_UPDATE_CFG 0x04
+
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_OFFSET 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_OFFSET 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_OFFSET 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_OFFSET 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF
+#define FW_MB_PARAM_PPFID_BITMAP_OFFSET 0
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
diff --git a/drivers/net/qede/base/meson.build b/drivers/net/qede/base/meson.build
new file mode 100644
index 00000000..71b89737
--- /dev/null
+++ b/drivers/net/qede/base/meson.build
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+sources = [
+ 'bcm_osal.c',
+ 'ecore_cxt.c',
+ 'ecore_dcbx.c',
+ 'ecore_dev.c',
+ 'ecore_hw.c',
+ 'ecore_init_fw_funcs.c',
+ 'ecore_init_ops.c',
+ 'ecore_int.c',
+ 'ecore_l2.c',
+ 'ecore_mcp.c',
+ 'ecore_sp_commands.c',
+ 'ecore_spq.c',
+ 'ecore_sriov.c',
+ 'ecore_vf.c',
+]
+
+
+error_cflags = [
+ '-Wno-unused-parameter',
+ '-Wno-sign-compare',
+ '-Wno-missing-prototypes',
+ '-Wno-cast-qual',
+ '-Wno-unused-function',
+ '-Wno-unused-variable',
+ '-Wno-strict-aliasing',
+ '-Wno-missing-prototypes',
+ '-Wno-unused-value',
+ '-Wno-format-nonliteral',
+ '-Wno-shift-negative-value',
+ '-Wno-unused-but-set-variable',
+ '-Wno-missing-declarations',
+ '-Wno-maybe-uninitialized',
+ '-Wno-strict-prototypes',
+ '-Wno-shift-negative-value',
+ '-Wno-implicit-fallthrough',
+ '-Wno-format-extra-args',
+ '-Wno-visibility',
+ '-Wno-empty-body',
+ '-Wno-invalid-source-encoding',
+ '-Wno-sometimes-uninitialized',
+ '-Wno-pointer-bool-conversion',
+]
+c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('qede_base', sources,
+ dependencies: static_rte_net,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 402f6204..be59f773 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -8,13 +8,13 @@
0
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
- 0xfff << 0)
+ 0xfffUL << 0)
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
12
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
- 0xfff << 12)
+ 0xfffUL << 12)
#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
24
@@ -322,6 +322,21 @@
0x180820UL
#define IGU_REG_ATTN_MSG_ADDR_H \
0x180824UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define IGU_REG_ATTENTION_ACK_BITS \
+ 0x180838UL
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
+#define IGU_REG_PF_FUNCTIONAL_CLEANUP \
+ 0x181210UL
+#define IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED \
+ 0x18042cUL
+#define IGU_REG_PBA_STS_PF_SIZE 5
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
#define CAU_REG_SB_ADDR_MEMORY \
@@ -351,9 +366,9 @@
#define IGU_REG_COMMAND_REG_CTRL \
0x180848UL
#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
- 0x1 << 1)
+ 0x1UL << 1)
#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
- 0x1 << 0)
+ 0x1UL << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define MISCS_REG_GENERIC_POR_0 \
@@ -361,7 +376,7 @@
#define MCP_REG_NVM_CFG4 \
0xe0642cUL
#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
- 0x7 << 0)
+ 0x7UL << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
@@ -394,7 +409,7 @@
#define XMAC_REG_TX_CTRL_LO 0x210020UL
#define XMAC_REG_CTRL 0x210000UL
#define XMAC_REG_RX_CTRL 0x210030UL
-#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12)
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1UL << 12)
#define MISC_REG_CLK_100G_MODE 0x008c10UL
#define MISC_REG_OPTE_MODE 0x008c0cUL
#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
@@ -424,16 +439,16 @@
#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
-#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
-#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
+#define XMAC_REG_CTRL_TX_EN (0x1UL << 0)
+#define XMAC_REG_CTRL_RX_EN (0x1UL << 1)
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */
-#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xffUL << 16)
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
-#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xffUL << 16)
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */
-#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfffUL << 0)
#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
-#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfffUL << 0)
#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
@@ -521,7 +536,7 @@
#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
#define MCP_REG_CPU_STATE 0xe05004UL
#define MCP_REG_CPU_MODE 0xe05000UL
-#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10)
+#define MCP_REG_CPU_MODE_SOFT_HALT (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
@@ -550,15 +565,15 @@
#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
-#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10)
+#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1UL << 10)
#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
#define TM_REG_INT_STS_1 0x2c0190UL
-#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6)
-#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1UL << 6)
+#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1UL << 5)
#define TM_REG_INT_MASK_1 0x2c0194UL
-#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5)
-#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6)
+#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1UL << 5)
+#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1UL << 6)
#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
@@ -1172,10 +1187,10 @@
#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL
#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL
#define XMAC_REG_CTRL_BB 0x210000UL
-#define XMAC_REG_CTRL_TX_EN_BB (0x1 << 0)
-#define XMAC_REG_CTRL_RX_EN_BB (0x1 << 1)
+#define XMAC_REG_CTRL_TX_EN_BB (0x1UL << 0)
+#define XMAC_REG_CTRL_RX_EN_BB (0x1UL << 1)
#define XMAC_REG_RX_CTRL_BB 0x210030UL
-#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1 << 12)
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1UL << 12)
#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5 0x2aaf98UL
#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5 0x2aaf9cUL
@@ -1202,15 +1217,26 @@
#define DORQ_REG_DPM_FORCE_ABORT 0x1009d8UL
#define DORQ_REG_PF_OVFL_STICKY 0x1009d0UL
#define DORQ_REG_INT_STS 0x100180UL
- #define DORQ_REG_INT_STS_DB_DROP (0x1 << 1)
- #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1 << 2)
- #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1 << 3)
+ #define DORQ_REG_INT_STS_DB_DROP (0x1UL << 1)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1UL << 2)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1UL << 3)
#define DORQ_REG_DB_DROP_DETAILS_REL 0x100a28UL
#define DORQ_REG_INT_STS_WR 0x100188UL
#define DORQ_REG_DB_DROP_DETAILS_REASON 0x100a20UL
#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
- #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
+ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
#define RSS_REG_RSS_RAM_MASK 0x238c10UL
+
+#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
+#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL
+#define DORQ_REG_TAG1_OVRD_MODE 0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 0x1008c8UL
+#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL
+#define NIG_REG_LLH_PPFID2PFID_TBL_0 0x501970UL
+#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
+#define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL 0x501b98UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 0x501b40UL
diff --git a/drivers/net/qede/meson.build b/drivers/net/qede/meson.build
new file mode 100644
index 00000000..12388a68
--- /dev/null
+++ b/drivers/net/qede/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'qede_ethdev.c',
+ 'qede_filter.c',
+ 'qede_main.c',
+ 'qede_rxtx.c',
+)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index df52ea92..518673dc 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -14,112 +14,10 @@ int qede_logtype_init;
int qede_logtype_driver;
static const struct qed_eth_ops *qed_ops;
-#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
-/* VXLAN tunnel classification mapping */
-const struct _qede_udp_tunn_types {
- uint16_t rte_filter_type;
- enum ecore_filter_ucast_type qede_type;
- enum ecore_tunn_clss qede_tunn_clss;
- const char *string;
-} qede_tunn_types[] = {
- {
- ETH_TUNNEL_FILTER_OMAC,
- ECORE_FILTER_MAC,
- ECORE_TUNN_CLSS_MAC_VLAN,
- "outer-mac"
- },
- {
- ETH_TUNNEL_FILTER_TENID,
- ECORE_FILTER_VNI,
- ECORE_TUNN_CLSS_MAC_VNI,
- "vni"
- },
- {
- ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_INNER_MAC,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-mac"
- },
- {
- ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_INNER_VLAN,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-vlan"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
- ECORE_FILTER_MAC_VNI_PAIR,
- ECORE_TUNN_CLSS_MAC_VNI,
- "outer-mac and vni"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-mac and inner-mac"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-mac and inner-vlan"
- },
- {
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_INNER_MAC_VNI_PAIR,
- ECORE_TUNN_CLSS_INNER_MAC_VNI,
- "vni and inner-mac",
- },
- {
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "vni and inner-vlan",
- },
- {
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_INNER_PAIR,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-mac and inner-vlan",
- },
- {
- ETH_TUNNEL_FILTER_OIP,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-IP"
- },
- {
- ETH_TUNNEL_FILTER_IIP,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "inner-IP"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_IVLAN"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_IVLAN_TENID"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_TENID,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_TENID"
- },
- {
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "OMAC_TENID_IMAC"
- },
-};
+#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
struct rte_qede_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -399,7 +297,7 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
(info->mfw_rev >> 16) & 0xff,
(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
- DP_INFO(edev, " Firmware file : %s\n", fw_file);
+ DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
DP_INFO(edev, "*********************************\n");
}
@@ -614,14 +512,6 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
return 0;
}
-static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
-{
- memset(ucast, 0, sizeof(struct ecore_filter_ucast));
- ucast->is_rx_filter = true;
- ucast->is_tx_filter = true;
- /* ucast->assert_on_error = true; - For debug */
-}
-
static int
qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
enum qed_filter_rx_mode_type type)
@@ -660,167 +550,7 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
ECORE_SPQ_MODE_CB, NULL);
}
-static int
-qede_tunnel_update(struct qede_dev *qdev,
- struct ecore_tunnel_info *tunn_info)
-{
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_hwfn *p_hwfn;
- struct ecore_ptt *p_ptt;
- int i;
-
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- if (IS_PF(edev)) {
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Can't acquire PTT\n");
- return -EAGAIN;
- }
- } else {
- p_ptt = NULL;
- }
-
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
- tunn_info, ECORE_SPQ_MODE_CB, NULL);
- if (IS_PF(edev))
- ecore_ptt_release(p_hwfn, p_ptt);
-
- if (rc != ECORE_SUCCESS)
- break;
- }
-
- return rc;
-}
-
-static int
-qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- if (qdev->vxlan.enable == enable)
- return ECORE_SUCCESS;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.vxlan.b_update_mode = true;
- tunn.vxlan.b_mode_enabled = enable;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
- tunn.vxlan.tun_cls = clss;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->vxlan.enable = enable;
- qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
- DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
- enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- }
-
- return rc;
-}
-
-static int
-qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.l2_geneve.b_update_mode = true;
- tunn.l2_geneve.b_mode_enabled = enable;
- tunn.ip_geneve.b_update_mode = true;
- tunn.ip_geneve.b_mode_enabled = enable;
- tunn.l2_geneve.tun_cls = clss;
- tunn.ip_geneve.tun_cls = clss;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->geneve.enable = enable;
- qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
- DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
- enable ? "enabled" : "disabled", qdev->geneve.udp_port);
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- clss);
- }
-
- return rc;
-}
-
-static int
-qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.ip_gre.b_update_mode = true;
- tunn.ip_gre.b_mode_enabled = enable;
- tunn.ip_gre.tun_cls = clss;
- tunn.ip_gre.tun_cls = clss;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->ipgre.enable = enable;
- DP_INFO(edev, "IPGRE is %s\n",
- enable ? "enabled" : "disabled");
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- clss);
- }
-
- return rc;
-}
-
-static int
-qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- enum rte_eth_tunnel_type tunn_type, bool enable)
-{
- int rc = -EINVAL;
-
- switch (tunn_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- rc = qede_vxlan_enable(eth_dev, clss, enable);
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- rc = qede_geneve_enable(eth_dev, clss, enable);
- break;
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
- rc = qede_ipgre_enable(eth_dev, clss, enable);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-static int
+int
qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
{
@@ -941,7 +671,7 @@ static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
return 0;
}
-static enum _ecore_status_t
+enum _ecore_status_t
qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
{
@@ -1033,7 +763,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
+void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_sp_vport_update_params params;
@@ -1359,7 +1089,7 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "Device is stopped\n");
}
-const char *valid_args[] = {
+static const char * const valid_args[] = {
QEDE_NPAR_TX_SWITCHING,
QEDE_VF_TX_SWITCHING,
NULL,
@@ -1483,7 +1213,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
@@ -1554,7 +1284,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
@@ -1651,14 +1380,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
PMD_INIT_FUNC_TRACE(edev);
-#endif
-
- enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
@@ -1668,12 +1394,10 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
-#endif
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
qed_configure_filter_rx_mode(eth_dev,
@@ -2499,19 +2223,18 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
struct qede_fastpath *fp;
uint32_t max_rx_pkt_len;
uint32_t frame_size;
- uint16_t rx_buf_size;
uint16_t bufsz;
bool restart = false;
- int i;
+ int i, rc;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
- max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
+ frame_size = max_rx_pkt_len;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
- ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
+ QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
@@ -2539,14 +2262,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = frame_size;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ /* cache align the mbuf size to simplfy rx_buf_size
+ * calculation
+ */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+ rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
+ if (rc < 0)
+ return rc;
+
+ fp->rxq->rx_buf_size = rc;
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
@@ -2569,411 +2293,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
static int
-qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn; /* @DPDK */
- uint16_t udp_port;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- memset(&tunn, 0, sizeof(tunn));
-
- switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
- DP_ERR(edev, "UDP port %u doesn't exist\n",
- tunnel_udp->udp_port);
- return ECORE_INVAL;
- }
- udp_port = 0;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u\n",
- tunn.vxlan_port.port);
- return rc;
- }
-
- qdev->vxlan.udp_port = udp_port;
- /* If the request is to delete UDP port and if the number of
- * VXLAN filters have reached 0 then VxLAN offload can be be
- * disabled.
- */
- if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
- return qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false);
-
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
- DP_ERR(edev, "UDP port %u doesn't exist\n",
- tunnel_udp->udp_port);
- return ECORE_INVAL;
- }
-
- udp_port = 0;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u\n",
- tunn.vxlan_port.port);
- return rc;
- }
-
- qdev->vxlan.udp_port = udp_port;
- /* If the request is to delete UDP port and if the number of
- * GENEVE filters have reached 0 then GENEVE offload can be be
- * disabled.
- */
- if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
- return qede_geneve_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false);
-
- break;
-
- default:
- return ECORE_INVAL;
- }
-
- return 0;
-
-}
-static int
-qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn; /* @DPDK */
- uint16_t udp_port;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- memset(&tunn, 0, sizeof(tunn));
-
- switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
- DP_INFO(edev,
- "UDP port %u for VXLAN was already configured\n",
- tunnel_udp->udp_port);
- return ECORE_SUCCESS;
- }
-
- /* Enable VxLAN tunnel with default MAC/VLAN classification if
- * it was not enabled while adding VXLAN filter before UDP port
- * update.
- */
- if (!qdev->vxlan.enable) {
- rc = qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to enable VXLAN "
- "prior to updating UDP port\n");
- return rc;
- }
- }
- udp_port = tunnel_udp->udp_port;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
- udp_port);
- return rc;
- }
-
- DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
-
- qdev->vxlan.udp_port = udp_port;
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
- DP_INFO(edev,
- "UDP port %u for GENEVE was already configured\n",
- tunnel_udp->udp_port);
- return ECORE_SUCCESS;
- }
-
- /* Enable GENEVE tunnel with default MAC/VLAN classification if
- * it was not enabled while adding GENEVE filter before UDP port
- * update.
- */
- if (!qdev->geneve.enable) {
- rc = qede_geneve_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to enable GENEVE "
- "prior to updating UDP port\n");
- return rc;
- }
- }
- udp_port = tunnel_udp->udp_port;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
- udp_port);
- return rc;
- }
-
- DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
-
- qdev->geneve.udp_port = udp_port;
- break;
- default:
- return ECORE_INVAL;
- }
-
- return 0;
-}
-
-static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
- uint32_t *clss, char *str)
-{
- uint16_t j;
- *clss = MAX_ECORE_TUNN_CLSS;
-
- for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
- if (filter == qede_tunn_types[j].rte_filter_type) {
- *type = qede_tunn_types[j].qede_type;
- *clss = qede_tunn_types[j].qede_tunn_clss;
- strcpy(str, qede_tunn_types[j].string);
- return;
- }
- }
-}
-
-static int
-qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
- const struct rte_eth_tunnel_filter_conf *conf,
- uint32_t type)
-{
- /* Init commmon ucast params first */
- qede_set_ucast_cmn_params(ucast);
-
- /* Copy out the required fields based on classification type */
- ucast->type = type;
-
- switch (type) {
- case ECORE_FILTER_VNI:
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_VLAN:
- ucast->vlan = conf->inner_vlan;
- break;
- case ECORE_FILTER_MAC:
- memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
- break;
- case ECORE_FILTER_INNER_MAC:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- break;
- case ECORE_FILTER_MAC_VNI_PAIR:
- memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_MAC_VNI_PAIR:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_PAIR:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vlan = conf->inner_vlan;
- break;
- default:
- return -EINVAL;
- }
-
- return ECORE_SUCCESS;
-}
-
-static int
-_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
- const struct rte_eth_tunnel_filter_conf *conf,
- __attribute__((unused)) enum rte_filter_op filter_op,
- enum ecore_tunn_clss *clss,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_ucast ucast = {0};
- enum ecore_filter_ucast_type type;
- uint16_t filter_type = 0;
- char str[80];
- int rc;
-
- filter_type = conf->filter_type;
- /* Determine if the given filter classification is supported */
- qede_get_ecore_tunn_params(filter_type, &type, clss, str);
- if (*clss == MAX_ECORE_TUNN_CLSS) {
- DP_ERR(edev, "Unsupported filter type\n");
- return -EINVAL;
- }
- /* Init tunnel ucast params */
- rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
- conf->filter_type);
- return rc;
- }
- DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
- str, filter_op, ucast.type);
-
- ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
-
- /* Skip MAC/VLAN if filter is based on VNI */
- if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
- rc = qede_mac_int_ops(eth_dev, &ucast, add);
- if ((rc == 0) && add) {
- /* Enable accept anyvlan */
- qede_config_accept_any_vlan(qdev, true);
- }
- } else {
- rc = qede_ucast_filter(eth_dev, &ucast, add);
- if (rc == 0)
- rc = ecore_filter_ucast_cmd(edev, &ucast,
- ECORE_SPQ_MODE_CB, NULL);
- }
-
- return rc;
-}
-
-static int
-qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- const struct rte_eth_tunnel_filter_conf *conf)
+qede_dev_reset(struct rte_eth_dev *dev)
{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
- bool add;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- add = true;
- break;
- case RTE_ETH_FILTER_DELETE:
- add = false;
- break;
- default:
- DP_ERR(edev, "Unsupported operation %d\n", filter_op);
- return -EINVAL;
- }
-
- if (IS_VF(edev))
- return qede_tunn_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN,
- conf->tunnel_type, add);
-
- rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- if (add) {
- if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
- qdev->vxlan.num_filters++;
- qdev->vxlan.filter_type = conf->filter_type;
- } else { /* GENEVE */
- qdev->geneve.num_filters++;
- qdev->geneve.filter_type = conf->filter_type;
- }
-
- if (!qdev->vxlan.enable || !qdev->geneve.enable ||
- !qdev->ipgre.enable)
- return qede_tunn_enable(eth_dev, clss,
- conf->tunnel_type,
- true);
- } else {
- if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
- qdev->vxlan.num_filters--;
- else /*GENEVE*/
- qdev->geneve.num_filters--;
-
- /* Disable VXLAN if VXLAN filters become 0 */
- if ((qdev->vxlan.num_filters == 0) ||
- (qdev->geneve.num_filters == 0))
- return qede_tunn_enable(eth_dev, clss,
- conf->tunnel_type,
- false);
- }
-
- return 0;
-}
+ int ret;
-int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_tunnel_filter_conf *filter_conf =
- (struct rte_eth_tunnel_filter_conf *)arg;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_TUNNEL:
- switch (filter_conf->tunnel_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
- DP_INFO(edev,
- "Packet steering to the specified Rx queue"
- " is not supported with UDP tunneling");
- return(qede_tunn_filter_config(eth_dev, filter_op,
- filter_conf));
- case RTE_TUNNEL_TYPE_TEREDO:
- case RTE_TUNNEL_TYPE_NVGRE:
- case RTE_L2_TUNNEL_TYPE_E_TAG:
- DP_ERR(edev, "Unsupported tunnel type %d\n",
- filter_conf->tunnel_type);
- return -EINVAL;
- case RTE_TUNNEL_TYPE_NONE:
- default:
- return 0;
- }
- break;
- case RTE_ETH_FILTER_FDIR:
- return qede_fdir_filter_conf(eth_dev, filter_op, arg);
- case RTE_ETH_FILTER_NTUPLE:
- return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
- case RTE_ETH_FILTER_MACVLAN:
- case RTE_ETH_FILTER_ETHERTYPE:
- case RTE_ETH_FILTER_FLEXIBLE:
- case RTE_ETH_FILTER_SYN:
- case RTE_ETH_FILTER_HASH:
- case RTE_ETH_FILTER_L2_TUNNEL:
- case RTE_ETH_FILTER_MAX:
- default:
- DP_ERR(edev, "Unsupported filter type %d\n",
- filter_type);
- return -EINVAL;
- }
+ ret = qede_eth_dev_uninit(dev);
+ if (ret)
+ return ret;
- return 0;
+ return qede_eth_dev_init(dev);
}
static const struct eth_dev_ops qede_eth_dev_ops = {
@@ -2981,9 +2309,11 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
+ .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
+ .dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.dev_set_link_down = qede_dev_set_link_down,
.link_update = qede_link_update,
@@ -3022,9 +2352,11 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
+ .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
+ .dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.dev_set_link_down = qede_dev_set_link_down,
.link_update = qede_link_update,
@@ -3257,7 +2589,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
- SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->arfs_info.arfs_list_head);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
SLIST_INIT(&adapter->mc_list_head);
@@ -3311,12 +2643,10 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
-#endif
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -3329,11 +2659,6 @@ static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
- if (eth_dev->data->mac_addrs)
- rte_free(eth_dev->data->mac_addrs);
-
- eth_dev->data->mac_addrs = NULL;
-
return 0;
}
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 6e9a5b4b..c06274d9 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -44,7 +44,7 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 9
+#define QEDE_PMD_VERSION_MINOR 10
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
@@ -118,7 +118,7 @@
-extern char fw_file[];
+extern char qede_fw_file[];
/* Number of PF connections - 32 RX + 32 TX */
#define QEDE_PF_NUM_CONNS (64)
@@ -151,18 +151,48 @@ struct qede_ucast_entry {
SLIST_ENTRY(qede_ucast_entry) list;
};
-struct qede_fdir_entry {
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+struct qede_arfs_tuple {
+ union {
+ uint32_t src_ipv4;
+ uint8_t src_ipv6[IPV6_ADDR_LEN];
+ };
+
+ union {
+ uint32_t dst_ipv4;
+ uint8_t dst_ipv6[IPV6_ADDR_LEN];
+ };
+
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint16_t eth_proto;
+ uint8_t ip_proto;
+
+ /* Describe filtering mode needed for this kind of filter */
+ enum ecore_filter_config_mode mode;
+};
+
+struct qede_arfs_entry {
uint32_t soft_id; /* unused for now */
uint16_t pkt_len; /* actual packet length to match */
uint16_t rx_queue; /* queue to be steered to */
const struct rte_memzone *mz; /* mz used to hold L2 frame */
- SLIST_ENTRY(qede_fdir_entry) list;
+ struct qede_arfs_tuple tuple;
+ SLIST_ENTRY(qede_arfs_entry) list;
};
-struct qede_fdir_info {
+/* Opaque handle for rte flow managed by PMD */
+struct rte_flow {
+ struct qede_arfs_entry entry;
+};
+
+struct qede_arfs_info {
struct ecore_arfs_config_params arfs;
uint16_t filter_count;
- SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
+ SLIST_HEAD(arfs_list_head, qede_arfs_entry)arfs_list_head;
};
/* IANA assigned default UDP ports for encapsulation protocols */
@@ -207,7 +237,7 @@ struct qede_dev {
struct qede_tunn_params vxlan;
struct qede_tunn_params geneve;
struct qede_tunn_params ipgre;
- struct qede_fdir_info fdir_info;
+ struct qede_arfs_info arfs_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
bool vport_started;
@@ -215,6 +245,15 @@ struct qede_dev {
void *ethdev;
};
+static inline void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
+{
+ memset(ucast, 0, sizeof(struct ecore_filter_ucast));
+ ucast->is_rx_filter = true;
+ ucast->is_tx_filter = true;
+ /* ucast->assert_on_error = true; - For debug */
+}
+
+
/* Non-static functions */
int qede_config_rss(struct rte_eth_dev *eth_dev);
@@ -235,9 +274,6 @@ int qede_link_update(struct rte_eth_dev *eth_dev,
int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
enum rte_filter_op op, void *arg);
-int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op, void *arg);
-
int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op, void *arg);
@@ -255,5 +291,16 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg);
int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
-
+int qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+int qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+
+enum _ecore_status_t
+qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add);
+void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg);
+int qede_ucast_filter(struct rte_eth_dev *eth_dev,
+ struct ecore_filter_ucast *ucast,
+ bool add);
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
deleted file mode 100644
index 83580d04..00000000
--- a/drivers/net/qede/qede_fdir.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2017 Cavium Inc.
- * All rights reserved.
- * www.cavium.com
- */
-
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
-#include <rte_errno.h>
-
-#include "qede_ethdev.h"
-
-#define IP_VERSION (0x40)
-#define IP_HDRLEN (0x5)
-#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
-#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
-#define QEDE_FDIR_IPV4_DEF_TTL (64)
-
-/* Sum of length of header types of L2, L3, L4.
- * L2 : ether_hdr + vlan_hdr + vxlan_hdr
- * L3 : ipv6_hdr
- * L4 : tcp_hdr
- */
-#define QEDE_MAX_FDIR_PKT_LEN (86)
-
-#ifndef IPV6_ADDR_LEN
-#define IPV6_ADDR_LEN (16)
-#endif
-
-#define QEDE_VALID_FLOW(flow_type) \
- ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
-
-/* Note: Flowdir support is only partial.
- * For ex: drop_queue, FDIR masks, flex_conf are not supported.
- * Parameters like pballoc/status fields are irrelevant here.
- */
-int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
-
- /* check FDIR modes */
- switch (fdir->mode) {
- case RTE_FDIR_MODE_NONE:
- qdev->fdir_info.arfs.arfs_enable = false;
- DP_INFO(edev, "flowdir is disabled\n");
- break;
- case RTE_FDIR_MODE_PERFECT:
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- qdev->fdir_info.arfs.arfs_enable = false;
- return -ENOTSUP;
- }
- qdev->fdir_info.arfs.arfs_enable = true;
- DP_INFO(edev, "flowdir is enabled\n");
- break;
- case RTE_FDIR_MODE_PERFECT_TUNNEL:
- case RTE_FDIR_MODE_SIGNATURE:
- case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
- DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
- return -ENOTSUP;
- }
-
- return 0;
-}
-
-void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct qede_fdir_entry *tmp = NULL;
-
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (tmp) {
- if (tmp->mz)
- rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
- rte_free(tmp);
- }
- }
-}
-
-static int
-qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir_filter,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
- struct qede_fdir_entry *tmp = NULL;
- struct qede_fdir_entry *fdir = NULL;
- const struct rte_memzone *mz;
- struct ecore_hwfn *p_hwfn;
- enum _ecore_status_t rc;
- uint16_t pkt_len;
- void *pkt;
-
- if (add) {
- if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
- DP_ERR(edev, "Reached max flowdir filter limit\n");
- return -EINVAL;
- }
- fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
- RTE_CACHE_LINE_SIZE);
- if (!fdir) {
- DP_ERR(edev, "Did not allocate memory for fdir\n");
- return -ENOMEM;
- }
- }
- /* soft_id could have been used as memzone string, but soft_id is
- * not currently used so it has no significance.
- */
- snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
- (unsigned long)rte_get_timer_cycles());
- mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
- SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
- if (!mz) {
- DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
- rte_strerror(rte_errno));
- rc = -rte_errno;
- goto err1;
- }
-
- pkt = mz->addr;
- memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
- pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
- &qdev->fdir_info.arfs);
- if (pkt_len == 0) {
- rc = -EINVAL;
- goto err2;
- }
- DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
- if (add) {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
- DP_INFO(edev, "flowdir filter exist\n");
- rc = 0;
- goto err2;
- }
- }
- } else {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
- break;
- }
- if (!tmp) {
- DP_ERR(edev, "flowdir filter does not exist\n");
- rc = -EEXIST;
- goto err2;
- }
- }
- p_hwfn = ECORE_LEADING_HWFN(edev);
- if (add) {
- if (!qdev->fdir_info.arfs.arfs_enable) {
- /* Force update */
- eth_dev->data->dev_conf.fdir_conf.mode =
- RTE_FDIR_MODE_PERFECT;
- qdev->fdir_info.arfs.arfs_enable = true;
- DP_INFO(edev, "Force enable flowdir in perfect mode\n");
- }
- /* Enable ARFS searcher with updated flow_types */
- ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
- }
- /* configure filter with ECORE_SPQ_MODE_EBLOCK */
- rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
- (dma_addr_t)mz->iova,
- pkt_len,
- fdir_filter->action.rx_queue,
- 0, add);
- if (rc == ECORE_SUCCESS) {
- if (add) {
- fdir->rx_queue = fdir_filter->action.rx_queue;
- fdir->pkt_len = pkt_len;
- fdir->mz = mz;
- SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
- fdir, list);
- qdev->fdir_info.filter_count++;
- DP_INFO(edev, "flowdir filter added, count = %d\n",
- qdev->fdir_info.filter_count);
- } else {
- rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
- rte_free(tmp); /* the node deleted */
- rte_memzone_free(mz); /* temp node allocated */
- qdev->fdir_info.filter_count--;
- DP_INFO(edev, "Fdir filter deleted, count = %d\n",
- qdev->fdir_info.filter_count);
- }
- } else {
- DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
- rc, qdev->fdir_info.filter_count);
- }
-
- /* Disable ARFS searcher if there are no more filters */
- if (qdev->fdir_info.filter_count == 0) {
- memset(&qdev->fdir_info.arfs, 0,
- sizeof(struct ecore_arfs_config_params));
- DP_INFO(edev, "Disabling flowdir\n");
- qdev->fdir_info.arfs.arfs_enable = false;
- ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
- }
- return 0;
-
-err2:
- rte_memzone_free(mz);
-err1:
- if (add)
- rte_free(fdir);
- return rc;
-}
-
-static int
-qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
- DP_ERR(edev, "invalid flow_type input\n");
- return -EINVAL;
- }
-
- if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
- DP_ERR(edev, "invalid queue number %u\n",
- fdir->action.rx_queue);
- return -EINVAL;
- }
-
- if (fdir->input.flow_ext.is_vf) {
- DP_ERR(edev, "flowdir is not supported over VF\n");
- return -EINVAL;
- }
-
- return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
-}
-
-/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
-uint16_t
-qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir,
- void *buff,
- struct ecore_arfs_config_params *params)
-
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- uint16_t *ether_type;
- uint8_t *raw_pkt;
- struct rte_eth_fdir_input *input;
- static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
- struct ipv4_hdr *ip;
- struct ipv6_hdr *ip6;
- struct udp_hdr *udp;
- struct tcp_hdr *tcp;
- uint16_t len;
- static const uint8_t next_proto[] = {
- [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
- };
- raw_pkt = (uint8_t *)buff;
- input = &fdir->input;
- DP_INFO(edev, "flow_type %d\n", input->flow_type);
-
- len = 2 * sizeof(struct ether_addr);
- raw_pkt += 2 * sizeof(struct ether_addr);
- if (input->flow_ext.vlan_tci) {
- DP_INFO(edev, "adding VLAN header\n");
- rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
- rte_memcpy(raw_pkt + sizeof(uint16_t),
- &input->flow_ext.vlan_tci,
- sizeof(uint16_t));
- raw_pkt += sizeof(vlan_frame);
- len += sizeof(vlan_frame);
- }
- ether_type = (uint16_t *)raw_pkt;
- raw_pkt += sizeof(uint16_t);
- len += sizeof(uint16_t);
-
- switch (input->flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- /* fill the common ip header */
- ip = (struct ipv4_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
- ip->total_length = sizeof(struct ipv4_hdr);
- ip->next_proto_id = input->flow.ip4_flow.proto ?
- input->flow.ip4_flow.proto :
- next_proto[input->flow_type];
- ip->time_to_live = input->flow.ip4_flow.ttl ?
- input->flow.ip4_flow.ttl :
- QEDE_FDIR_IPV4_DEF_TTL;
- ip->type_of_service = input->flow.ip4_flow.tos;
- ip->dst_addr = input->flow.ip4_flow.dst_ip;
- ip->src_addr = input->flow.ip4_flow.src_ip;
- len += sizeof(struct ipv4_hdr);
- params->ipv4 = true;
-
- raw_pkt = (uint8_t *)buff;
- /* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- udp->dst_port = input->flow.udp4_flow.dst_port;
- udp->src_port = input->flow.udp4_flow.src_port;
- udp->dgram_len = sizeof(struct udp_hdr);
- len += sizeof(struct udp_hdr);
- /* adjust ip total_length */
- ip->total_length += sizeof(struct udp_hdr);
- params->udp = true;
- } else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
- tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
- /* adjust ip total_length */
- ip->total_length += sizeof(struct tcp_hdr);
- params->tcp = true;
- }
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- ip6 = (struct ipv6_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
- ip6->proto = input->flow.ipv6_flow.proto ?
- input->flow.ipv6_flow.proto :
- next_proto[input->flow_type];
- rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
- IPV6_ADDR_LEN);
- rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
- IPV6_ADDR_LEN);
- len += sizeof(struct ipv6_hdr);
-
- raw_pkt = (uint8_t *)buff;
- /* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- udp->src_port = input->flow.udp6_flow.dst_port;
- udp->dst_port = input->flow.udp6_flow.src_port;
- len += sizeof(struct udp_hdr);
- params->udp = true;
- } else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
- tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
- params->tcp = true;
- }
- break;
- default:
- DP_ERR(edev, "Unsupported flow_type %u\n",
- input->flow_type);
- return 0;
- }
-
- return len;
-}
-
-int
-qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_fdir_filter *fdir;
- int ret;
-
- fdir = (struct rte_eth_fdir_filter *)arg;
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- /* Typically used to query flowdir support */
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- return -ENOTSUP;
- }
- return 0; /* means supported */
- case RTE_ETH_FILTER_ADD:
- ret = qede_fdir_filter_add(eth_dev, fdir, 1);
- break;
- case RTE_ETH_FILTER_DELETE:
- ret = qede_fdir_filter_add(eth_dev, fdir, 0);
- break;
- case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_UPDATE:
- case RTE_ETH_FILTER_INFO:
- return -ENOTSUP;
- break;
- default:
- DP_ERR(edev, "unknown operation %u", filter_op);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_ntuple_filter *ntuple;
- struct rte_eth_fdir_filter fdir_entry;
- struct rte_eth_tcpv4_flow *tcpv4_flow;
- struct rte_eth_udpv4_flow *udpv4_flow;
- bool add = false;
-
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- /* Typically used to query fdir support */
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- return -ENOTSUP;
- }
- return 0; /* means supported */
- case RTE_ETH_FILTER_ADD:
- add = true;
- break;
- case RTE_ETH_FILTER_DELETE:
- break;
- case RTE_ETH_FILTER_INFO:
- case RTE_ETH_FILTER_GET:
- case RTE_ETH_FILTER_UPDATE:
- case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_SET:
- case RTE_ETH_FILTER_STATS:
- case RTE_ETH_FILTER_OP_MAX:
- DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
- return -ENOTSUP;
- }
- ntuple = (struct rte_eth_ntuple_filter *)arg;
- /* Internally convert ntuple to fdir entry */
- memset(&fdir_entry, 0, sizeof(fdir_entry));
- if (ntuple->proto == IPPROTO_TCP) {
- fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
- tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
- tcpv4_flow->ip.src_ip = ntuple->src_ip;
- tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
- tcpv4_flow->ip.proto = IPPROTO_TCP;
- tcpv4_flow->src_port = ntuple->src_port;
- tcpv4_flow->dst_port = ntuple->dst_port;
- } else {
- fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
- udpv4_flow = &fdir_entry.input.flow.udp4_flow;
- udpv4_flow->ip.src_ip = ntuple->src_ip;
- udpv4_flow->ip.dst_ip = ntuple->dst_ip;
- udpv4_flow->ip.proto = IPPROTO_TCP;
- udpv4_flow->src_port = ntuple->src_port;
- udpv4_flow->dst_port = ntuple->dst_port;
- }
-
- fdir_entry.action.rx_queue = ntuple->queue;
-
- return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
-}
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
new file mode 100644
index 00000000..5e6571ca
--- /dev/null
+++ b/drivers/net/qede/qede_filter.c
@@ -0,0 +1,1546 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_errno.h>
+#include <rte_flow_driver.h>
+
+#include "qede_ethdev.h"
+
+/* VXLAN tunnel classification mapping */
+const struct _qede_udp_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
+#define IP_VERSION (0x40)
+#define IP_HDRLEN (0x5)
+#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
+#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
+#define QEDE_FDIR_IPV4_DEF_TTL (64)
+#define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
+/* Sum of length of header types of L2, L3, L4.
+ * L2 : ether_hdr + vlan_hdr + vxlan_hdr
+ * L3 : ipv6_hdr
+ * L4 : tcp_hdr
+ */
+#define QEDE_MAX_FDIR_PKT_LEN (86)
+
+static inline bool qede_valid_flow(uint16_t flow_type)
+{
+ return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
+}
+
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ void *buff,
+ struct ecore_arfs_config_params *params);
+
+/* Note: Flowdir support is only partial.
+ * For ex: drop_queue, FDIR masks, flex_conf are not supported.
+ * Parameters like pballoc/status fields are irrelevant here.
+ */
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+
+ /* check FDIR modes */
+ switch (fdir->mode) {
+ case RTE_FDIR_MODE_NONE:
+ qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
+ DP_INFO(edev, "flowdir is disabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_DISABLE;
+ return -ENOTSUP;
+ }
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE;
+ DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT_TUNNEL:
+ case RTE_FDIR_MODE_SIGNATURE:
+ case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
+ DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_arfs_entry *tmp = NULL;
+
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
+ if (tmp) {
+ if (tmp->mz)
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+ qede_arfs_entry, list);
+ rte_free(tmp);
+ }
+ }
+}
+
+static int
+qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ struct qede_arfs_entry *arfs)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_input *input;
+
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+
+ input = &fdir->input;
+
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
+ arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
+ arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
+ arfs->tuple.ip_proto = next_proto[input->flow_type];
+
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
+ arfs->tuple.src_port = input->flow.udp4_flow.src_port;
+ } else { /* TCP */
+ arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
+ arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
+ arfs->tuple.ip_proto = next_proto[input->flow_type];
+ rte_memcpy(arfs->tuple.dst_ipv6,
+ &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(arfs->tuple.src_ipv6,
+ &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
+ arfs->tuple.src_port = input->flow.udp6_flow.src_port;
+ } else { /* TCP */
+ arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
+ arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return -ENOTSUP;
+ }
+
+ arfs->rx_queue = fdir->action.rx_queue;
+ return 0;
+}
+
+static int
+qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
+ struct qede_arfs_entry *tmp = NULL;
+ const struct rte_memzone *mz;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc;
+ uint16_t pkt_len;
+ void *pkt;
+
+ if (add) {
+ if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ DP_ERR(edev, "Reached max flowdir filter limit\n");
+ return -EINVAL;
+ }
+ }
+
+ /* soft_id could have been used as memzone string, but soft_id is
+ * not currently used so it has no significance.
+ */
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
+ SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
+ rte_strerror(rte_errno));
+ return -rte_errno;
+ }
+
+ pkt = mz->addr;
+ memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
+ pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
+ &qdev->arfs_info.arfs);
+ if (pkt_len == 0) {
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
+ DP_INFO(edev, "flowdir filter exist\n");
+ rc = -EEXIST;
+ goto err1;
+ }
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
+ break;
+ }
+ if (!tmp) {
+ DP_ERR(edev, "flowdir filter does not exist\n");
+ rc = -EEXIST;
+ goto err1;
+ }
+ }
+ p_hwfn = ECORE_LEADING_HWFN(edev);
+ if (add) {
+ if (qdev->arfs_info.arfs.mode ==
+ ECORE_FILTER_CONFIG_MODE_DISABLE) {
+ /* Force update */
+ eth_dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_PERFECT;
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE;
+ DP_INFO(edev, "Force enable flowdir in perfect mode\n");
+ }
+ /* Enable ARFS searcher with updated flow_types */
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->arfs_info.arfs);
+ }
+ /* configure filter with ECORE_SPQ_MODE_EBLOCK */
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
+ (dma_addr_t)mz->iova,
+ pkt_len,
+ arfs->rx_queue,
+ 0, add);
+ if (rc == ECORE_SUCCESS) {
+ if (add) {
+ arfs->pkt_len = pkt_len;
+ arfs->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
+ arfs, list);
+ qdev->arfs_info.filter_count++;
+ DP_INFO(edev, "flowdir filter added, count = %d\n",
+ qdev->arfs_info.filter_count);
+ } else {
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+ qede_arfs_entry, list);
+ rte_free(tmp); /* the node deleted */
+ rte_memzone_free(mz); /* temp node allocated */
+ qdev->arfs_info.filter_count--;
+ DP_INFO(edev, "Fdir filter deleted, count = %d\n",
+ qdev->arfs_info.filter_count);
+ }
+ } else {
+ DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
+ rc, qdev->arfs_info.filter_count);
+ }
+
+ /* Disable ARFS searcher if there are no more filters */
+ if (qdev->arfs_info.filter_count == 0) {
+ memset(&qdev->arfs_info.arfs, 0,
+ sizeof(struct ecore_arfs_config_params));
+ DP_INFO(edev, "Disabling flowdir\n");
+ qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->arfs_info.arfs);
+ }
+ return 0;
+
+err1:
+ rte_memzone_free(mz);
+ return rc;
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_arfs_entry *arfs = NULL;
+ int rc = 0;
+
+ arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!arfs) {
+ DP_ERR(edev, "Did not allocate memory for arfs\n");
+ return -ENOMEM;
+ }
+
+ rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
+ if (rc < 0)
+ return rc;
+
+ rc = qede_config_arfs_filter(eth_dev, arfs, add);
+ if (rc < 0)
+ rte_free(arfs);
+
+ return rc;
+}
+
+static int
+qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (!qede_valid_flow(fdir->input.flow_type)) {
+ DP_ERR(edev, "invalid flow_type input\n");
+ return -EINVAL;
+ }
+
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ DP_ERR(edev, "invalid queue number %u\n",
+ fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+ if (fdir->input.flow_ext.is_vf) {
+ DP_ERR(edev, "flowdir is not supported over VF\n");
+ return -EINVAL;
+ }
+
+ return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
+}
+
+/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ void *buff,
+ struct ecore_arfs_config_params *params)
+
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint16_t *ether_type;
+ uint8_t *raw_pkt;
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ uint16_t len;
+
+ raw_pkt = (uint8_t *)buff;
+
+ len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
+ switch (arfs->tuple.eth_proto) {
+ case ETHER_TYPE_IPv4:
+ ip = (struct ipv4_hdr *)raw_pkt;
+ ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
+ ip->total_length = sizeof(struct ipv4_hdr);
+ ip->next_proto_id = arfs->tuple.ip_proto;
+ ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
+ ip->dst_addr = arfs->tuple.dst_ipv4;
+ ip->src_addr = arfs->tuple.src_ipv4;
+ len += sizeof(struct ipv4_hdr);
+ params->ipv4 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (arfs->tuple.ip_proto == IPPROTO_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dst_port = arfs->tuple.dst_port;
+ udp->src_port = arfs->tuple.src_port;
+ udp->dgram_len = sizeof(struct udp_hdr);
+ len += sizeof(struct udp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = arfs->tuple.src_port;
+ tcp->dst_port = arfs->tuple.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ case ETHER_TYPE_IPv6:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ ip6->proto = arfs->tuple.ip_proto;
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
+
+ rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ params->ipv6 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (arfs->tuple.ip_proto == IPPROTO_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->src_port = arfs->tuple.src_port;
+ udp->dst_port = arfs->tuple.dst_port;
+ len += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = arfs->tuple.src_port;
+ tcp->dst_port = arfs->tuple.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported eth_proto %u\n",
+ arfs->tuple.eth_proto);
+ return 0;
+ }
+
+ return len;
+}
+
+static int
+qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_filter *fdir;
+ int ret;
+
+ fdir = (struct rte_eth_fdir_filter *)arg;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query flowdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 0);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_INFO:
+ return -ENOTSUP;
+ break;
+ default:
+ DP_ERR(edev, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_ntuple_filter *ntuple;
+ struct rte_eth_fdir_filter fdir_entry;
+ struct rte_eth_tcpv4_flow *tcpv4_flow;
+ struct rte_eth_udpv4_flow *udpv4_flow;
+ bool add = false;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query fdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ break;
+ case RTE_ETH_FILTER_INFO:
+ case RTE_ETH_FILTER_GET:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_SET:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_OP_MAX:
+ DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
+ return -ENOTSUP;
+ }
+ ntuple = (struct rte_eth_ntuple_filter *)arg;
+ /* Internally convert ntuple to fdir entry */
+ memset(&fdir_entry, 0, sizeof(fdir_entry));
+ if (ntuple->proto == IPPROTO_TCP) {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
+ tcpv4_flow->ip.src_ip = ntuple->src_ip;
+ tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ tcpv4_flow->ip.proto = IPPROTO_TCP;
+ tcpv4_flow->src_port = ntuple->src_port;
+ tcpv4_flow->dst_port = ntuple->dst_port;
+ } else {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ udpv4_flow = &fdir_entry.input.flow.udp4_flow;
+ udpv4_flow->ip.src_ip = ntuple->src_ip;
+ udpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ udpv4_flow->ip.proto = IPPROTO_TCP;
+ udpv4_flow->src_port = ntuple->src_port;
+ udpv4_flow->dst_port = ntuple->dst_port;
+ }
+
+ fdir_entry.action.rx_queue = ntuple->queue;
+
+ return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
+}
+
+static int
+qede_tunnel_update(struct qede_dev *qdev,
+ struct ecore_tunnel_info *tunn_info)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+ } else {
+ p_ptt = NULL;
+ }
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+ tunn_info, ECORE_SPQ_MODE_CB, NULL);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ if (qdev->vxlan.enable == enable)
+ return ECORE_SUCCESS;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.vxlan.b_update_mode = true;
+ tunn.vxlan.b_mode_enabled = enable;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+ tunn.vxlan.tun_cls = clss;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->vxlan.enable = enable;
+ qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+ DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+
+ return rc;
+}
+
+static int
+qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.l2_geneve.b_update_mode = true;
+ tunn.l2_geneve.b_mode_enabled = enable;
+ tunn.ip_geneve.b_update_mode = true;
+ tunn.ip_geneve.b_mode_enabled = enable;
+ tunn.l2_geneve.tun_cls = clss;
+ tunn.ip_geneve.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->geneve.enable = enable;
+ qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
+ DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->geneve.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.ip_gre.b_update_mode = true;
+ tunn.ip_gre.b_mode_enabled = enable;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->ipgre.enable = enable;
+ DP_INFO(edev, "IPGRE is %s\n",
+ enable ? "enabled" : "disabled");
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+ udp_port = 0;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * VXLAN filters have reached 0 then VxLAN offload can be be
+ * disabled.
+ */
+ if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+
+ udp_port = 0;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * GENEVE filters have reached 0 then GENEVE offload can be be
+ * disabled.
+ */
+ if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
+ return qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+}
+
+int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for VXLAN was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable VxLAN tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding VXLAN filter before UDP port
+ * update.
+ */
+ if (!qdev->vxlan.enable) {
+ rc = qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable VXLAN "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
+
+ qdev->vxlan.udp_port = udp_port;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for GENEVE was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable GENEVE tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding GENEVE filter before UDP port
+ * update.
+ */
+ if (!qdev->geneve.enable) {
+ rc = qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable GENEVE "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
+
+ qdev->geneve.udp_port = udp_port;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ __attribute__((unused)) enum rte_filter_op filter_op,
+ enum ecore_tunn_clss *clss,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_ucast ucast = {0};
+ enum ecore_filter_ucast_type type;
+ uint16_t filter_type = 0;
+ char str[80];
+ int rc;
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, clss, str);
+ if (*clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
+ ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, add);
+ if (rc == 0 && add) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ enum rte_eth_tunnel_type tunn_type, bool enable)
+{
+ int rc = -EINVAL;
+
+ switch (tunn_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ rc = qede_vxlan_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ rc = qede_geneve_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ rc = qede_ipgre_enable(eth_dev, clss, enable);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+ bool add;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ add = false;
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+
+ if (IS_VF(edev))
+ return qede_tunn_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ conf->tunnel_type, add);
+
+ rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (add) {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
+ qdev->vxlan.num_filters++;
+ qdev->vxlan.filter_type = conf->filter_type;
+ } else { /* GENEVE */
+ qdev->geneve.num_filters++;
+ qdev->geneve.filter_type = conf->filter_type;
+ }
+
+ if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+ !qdev->ipgre.enable)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ true);
+ } else {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
+ qdev->vxlan.num_filters--;
+ else /*GENEVE*/
+ qdev->geneve.num_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if (qdev->vxlan.num_filters == 0 ||
+ qdev->geneve.num_filters == 0)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ false);
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->transfer != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ bool l3 = false, l4 = false;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ if (!pattern->spec) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item spec not defined");
+ return -rte_errno;
+ }
+
+ if (pattern->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item last not supported");
+ return -rte_errno;
+ }
+
+ if (pattern->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item mask not supported");
+ return -rte_errno;
+ }
+
+ /* Below validation is only for 4 tuple flow
+ * (GFT_PROFILE_TYPE_4_TUPLE)
+ * - src and dst L3 address (IPv4 or IPv6)
+ * - src and dst L4 port (TCP or UDP)
+ */
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv4 *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
+ flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv6 *spec;
+
+ spec = pattern->spec;
+ rte_memcpy(flow->entry.tuple.src_ipv6,
+ spec->hdr.src_addr,
+ IPV6_ADDR_LEN);
+ rte_memcpy(flow->entry.tuple.dst_ipv6,
+ spec->hdr.dst_addr,
+ IPV6_ADDR_LEN);
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_udp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_UDP;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_tcp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_TCP;
+ }
+
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!(l3 && l4)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item types need to have both L3 and L4 protocols");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ const struct rte_flow_action_queue *queue;
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+
+ if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+
+ if (flow)
+ flow->entry.rx_queue = queue->index;
+
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Action is not supported - only ACTION_TYPE_QUEUE supported");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+
+{
+ int rc = 0;
+
+ rc = qede_flow_validate_attr(dev, attr, error);
+ if (rc)
+ return rc;
+
+ /* parse and validate item pattern and actions.
+ * Given item list and actions will be translate to qede PMD
+ * specific arfs structure.
+ */
+ rc = qede_flow_parse_pattern(dev, patterns, error, flow);
+ if (rc)
+ return rc;
+
+ rc = qede_flow_parse_actions(dev, actions, error, flow);
+
+ return rc;
+}
+
+static int
+qede_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
+}
+
+static struct rte_flow *
+qede_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ return NULL;
+ }
+
+ rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
+ if (rc < 0) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ rc = qede_config_arfs_filter(dev, &flow->entry, true);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to configure flow filter");
+ rte_free(flow);
+ return NULL;
+ }
+
+ return flow;
+}
+
+static int
+qede_flow_destroy(struct rte_eth_dev *eth_dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to delete flow filter");
+ rte_free(flow);
+ }
+
+ return rc;
+}
+
+const struct rte_flow_ops qede_flow_ops = {
+ .validate = qede_flow_validate,
+ .create = qede_flow_create,
+ .destroy = qede_flow_destroy,
+};
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with UDP tunneling");
+ return(qede_tunn_filter_config(eth_dev, filter_op,
+ filter_conf));
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_GENERIC:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+
+ *(const void **)arg = &qede_flow_ops;
+ return 0;
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 46fa8371..df83666f 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -15,10 +15,10 @@
#define QEDE_ALARM_TIMEOUT_US 100000
/* Global variable to hold absolute path of fw file */
-char fw_file[PATH_MAX];
+char qede_fw_file[PATH_MAX];
-const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.33.12.0.bin";
+static const char * const QEDE_DEFAULT_FIRMWARE =
+ "/lib/firmware/qed/qed_init_values-8.37.7.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -126,11 +126,11 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
const char *fw = RTE_LIBRTE_QEDE_FW;
if (strcmp(fw, "") == 0)
- strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
+ strcpy(qede_fw_file, QEDE_DEFAULT_FIRMWARE);
else
- strcpy(fw_file, fw);
+ strcpy(qede_fw_file, fw);
- fd = open(fw_file, O_RDONLY);
+ fd = open(qede_fw_file, O_RDONLY);
if (fd < 0) {
DP_ERR(edev, "Can't open firmware file\n");
return -ENOENT;
@@ -234,7 +234,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
#ifdef CONFIG_ECORE_BINARY_FW
rc = qed_load_firmware_data(edev);
if (rc) {
- DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
+ DP_ERR(edev, "Failed to find fw file %s\n",
+ qede_fw_file);
goto err;
}
#endif
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 0f157ded..8a4772f4 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -35,6 +35,49 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
return 0;
}
+/* Criterias for calculating Rx buffer size -
+ * 1) rx_buf_size should not exceed the size of mbuf
+ * 2) In scattered_rx mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
+ * 3) In regular mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2)
+ * In above cases +2 corrosponds to 2 bytes padding in front of L2
+ * header.
+ * 4) rx_buf_size should be cacheline-size aligned. So considering
+ * criteria 1, we need to adjust the size to floor instead of ceil,
+ * so that we don't exceed mbuf size while ceiling rx_buf_size.
+ */
+int
+qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rx_buf_size;
+
+ if (dev->data->scattered_rx) {
+ /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
+ * bufferes can be used for single packet. So need to make sure
+ * mbuf size is sufficient enough for this.
+ */
+ if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
+ (max_frame_size + QEDE_ETH_OVERHEAD)) {
+ DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
+ mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
+ return -EINVAL;
+ }
+
+ rx_buf_size = RTE_MAX(mbufsz,
+ (max_frame_size + QEDE_ETH_OVERHEAD) /
+ ETH_RX_MAX_BUFF_PER_PKT);
+ } else {
+ rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
+ }
+
+ /* Align to cache-line size if needed */
+ return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
+}
+
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
@@ -85,6 +128,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ /* cache align the mbuf size to simplfy rx_buf_size calculation */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
@@ -93,13 +138,13 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
- if (dev->data->scattered_rx)
- rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
- /* Align to cache-line size if needed */
- rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ if (rc < 0) {
+ rte_free(rxq);
+ return rc;
+ }
+
+ rxq->rx_buf_size = rc;
DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
@@ -2106,3 +2151,84 @@ qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
{
return 0;
}
+
+
+/* this function does a fake walk through over completion queue
+ * to calculate number of BDs used by HW.
+ * At the end, it restores the state of completion queue.
+ */
+static uint16_t
+qede_parse_fp_cqe(struct qede_rx_queue *rxq)
+{
+ uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
+ union eth_rx_cqe *cqe, *orig_cqe = NULL;
+
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ orig_cqe = cqe;
+
+ while (sw_comp_cons != hw_comp_cons) {
+ switch (cqe->fast_path_regular.type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ bd_count += cqe->fast_path_regular.bd_num;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ bd_count += cqe->fast_path_tpa_end.num_of_bds;
+ break;
+ default:
+ break;
+ }
+
+ cqe =
+ (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+ }
+
+ /* revert comp_ring to original state */
+ ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
+
+ return bd_count;
+}
+
+int
+qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
+{
+ uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
+ uint16_t produced, consumed;
+ struct qede_rx_queue *rxq = p_rxq;
+
+ if (offset > rxq->nb_rx_desc)
+ return -EINVAL;
+
+ sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
+ sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+
+ /* find BDs used by HW from completion queue elements */
+ hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
+
+ if (hw_bd_cons < sw_bd_cons)
+ /* wraparound case */
+ consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
+ else
+ consumed = hw_bd_cons - sw_bd_cons;
+
+ if (offset <= consumed)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (sw_bd_prod < sw_bd_cons)
+ /* wraparound case */
+ produced = (0xffff - sw_bd_cons) + sw_bd_prod;
+ else
+ produced = sw_bd_prod - sw_bd_cons;
+
+ if (offset <= produced)
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index e710fbae..d3a41e92 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -61,9 +61,16 @@
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
~(QEDE_FW_RX_ALIGN_END - 1))
-/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
-#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
- + (QEDE_LLC_SNAP_HDR_LEN))
+#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
+ QEDE_FW_RX_ALIGN_END)
+
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
+ * +2 is for padding in front of L2 header
+ */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN) + 2)
+
+#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
@@ -267,6 +274,10 @@ uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
int qede_start_queues(struct rte_eth_dev *eth_dev);
void qede_stop_queues(struct rte_eth_dev *eth_dev);
+int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size);
+int
+qede_rx_descriptor_status(void *rxq, uint16_t offset);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 791deb0b..aeb48f5e 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -164,7 +164,6 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -667,10 +666,8 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev)
}
}
- rte_free(eth_dev->data->rx_queues);
- rte_free(eth_dev->data->tx_queues);
- rte_free(eth_dev->data->dev_private);
-
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(eth_dev);
return 0;
}
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
index 7f89a7bf..cdf835f0 100644
--- a/drivers/net/sfc/base/ef10_ev.c
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -73,11 +73,10 @@ efx_mcdi_set_evq_tmr(
__in uint32_t timer_ns)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
- MC_CMD_SET_EVQ_TMR_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
+ MC_CMD_SET_EVQ_TMR_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_EVQ_TMR;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
@@ -123,9 +122,9 @@ efx_mcdi_init_evq(
__in boolean_t low_latency)
{
efx_mcdi_req_t req;
- uint8_t payload[
- MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
- MC_CMD_INIT_EVQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_OUT_LEN);
efx_qword_t *dma_addr;
uint64_t addr;
int npages;
@@ -140,7 +139,6 @@ efx_mcdi_init_evq(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_EVQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
@@ -260,9 +258,9 @@ efx_mcdi_init_evq_v2(
__in uint32_t flags)
{
efx_mcdi_req_t req;
- uint8_t payload[
- MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
- MC_CMD_INIT_EVQ_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_V2_OUT_LEN);
boolean_t interrupting;
unsigned int evq_type;
efx_qword_t *dma_addr;
@@ -277,7 +275,6 @@ efx_mcdi_init_evq_v2(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_EVQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
@@ -384,11 +381,10 @@ efx_mcdi_fini_evq(
__in uint32_t instance)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
- MC_CMD_FINI_EVQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FINI_EVQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
@@ -603,8 +599,8 @@ efx_mcdi_driver_event(
__in efx_qword_t data)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
- MC_CMD_DRIVER_EVENT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
+ MC_CMD_DRIVER_EVENT_OUT_LEN);
efx_rc_t rc;
req.emr_cmd = MC_CMD_DRIVER_EVENT;
@@ -867,8 +863,9 @@ ef10_ev_rx(
EFX_EV_QSTAT_INCR(eep, EV_RX);
- /* Discard events after RXQ/TXQ errors */
- if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ /* Discard events after RXQ/TXQ errors, or hardware not available */
+ if (enp->en_reset_flags &
+ (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
return (B_FALSE);
/* Basic packet information */
@@ -1068,8 +1065,9 @@ ef10_ev_tx(
EFX_EV_QSTAT_INCR(eep, EV_TX);
- /* Discard events after RXQ/TXQ errors */
- if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ /* Discard events after RXQ/TXQ errors, or hardware not available */
+ if (enp->en_reset_flags &
+ (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
return (B_FALSE);
if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
index ae872853..afe4064d 100644
--- a/drivers/net/sfc/base/ef10_filter.c
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -172,12 +172,11 @@ efx_mcdi_filter_op_add(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_V3_IN_LEN,
- MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FILTER_OP_V3_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN);
efx_filter_match_flags_t match_flags;
efx_rc_t rc;
- memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN;
@@ -376,11 +375,10 @@ efx_mcdi_filter_op_delete(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
- MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FILTER_OP_EXT_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN);
efx_rc_t rc;
- memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
@@ -950,13 +948,12 @@ efx_mcdi_get_parser_disp_info(
__out size_t *list_lengthp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
- MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
size_t matches_count;
size_t list_size;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
@@ -1144,12 +1141,15 @@ ef10_filter_insert_unicast(
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
+ rc = efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+ if (rc != 0)
+ goto fail1;
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
if (rc != 0)
- goto fail1;
+ goto fail2;
eftp->eft_unicst_filter_count++;
EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
@@ -1157,6 +1157,8 @@ ef10_filter_insert_unicast(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
@@ -1175,11 +1177,13 @@ ef10_filter_insert_all_unicast(
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_uc_def(&spec);
+ rc = efx_filter_spec_set_uc_def(&spec);
+ if (rc != 0)
+ goto fail1;
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
if (rc != 0)
- goto fail1;
+ goto fail2;
eftp->eft_unicst_filter_count++;
EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
@@ -1187,6 +1191,8 @@ ef10_filter_insert_all_unicast(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
@@ -1228,9 +1234,21 @@ ef10_filter_insert_multicast_list(
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_eth_local(&spec,
+ rc = efx_filter_spec_set_eth_local(&spec,
EFX_FILTER_SPEC_VID_UNSPEC,
&addrs[i * EFX_MAC_ADDR_LEN]);
+ if (rc != 0) {
+ if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ } else {
+ /*
+ * Don't try to add a filter with a corrupt
+ * specification.
+ */
+ continue;
+ }
+ }
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&filter_index);
@@ -1253,8 +1271,12 @@ ef10_filter_insert_multicast_list(
eftp->eft_default_rxq);
EFX_MAC_BROADCAST_ADDR_SET(addr);
- efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
- addr);
+ rc = efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC, addr);
+ if ((rc != 0) && (rollback == B_TRUE)) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&filter_index);
@@ -1302,12 +1324,14 @@ ef10_filter_insert_all_multicast(
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_mc_def(&spec);
+ rc = efx_filter_spec_set_mc_def(&spec);
+ if (rc != 0)
+ goto fail1;
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&eftp->eft_mulcst_filter_indexes[0]);
if (rc != 0)
- goto fail1;
+ goto fail2;
eftp->eft_mulcst_filter_count = 1;
eftp->eft_using_all_mulcst = B_TRUE;
@@ -1318,6 +1342,8 @@ ef10_filter_insert_all_multicast(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1552,7 +1578,7 @@ ef10_filter_reconfigure(
/*
* Insert or renew unicast filters.
*
- * Frimware does not perform chaining on unicast filters. As traffic is
+ * Firmware does not perform chaining on unicast filters. As traffic is
* therefore only delivered to the first matching filter, we should
* always insert the specific filter for our MAC address, to try and
* ensure we get that traffic.
diff --git a/drivers/net/sfc/base/ef10_image.c b/drivers/net/sfc/base/ef10_image.c
index 6fb7e476..c035e0df 100644
--- a/drivers/net/sfc/base/ef10_image.c
+++ b/drivers/net/sfc/base/ef10_image.c
@@ -577,7 +577,8 @@ fail1:
__checkReturn efx_rc_t
efx_build_signed_image_write_buffer(
- __out uint8_t *bufferp,
+ __out_bcount(buffer_size)
+ uint8_t *bufferp,
__in uint32_t buffer_size,
__in efx_image_info_t *infop,
__out efx_image_header_t **headerpp)
@@ -704,7 +705,7 @@ efx_build_signed_image_write_buffer(
* results in the layout used for the data chunks and chunk headers.
*/
/* END CSTYLED */
- memset(bufferp, buffer_size, 0xFF);
+ memset(bufferp, 0xFF, buffer_size);
EFX_STATIC_ASSERT(sizeof (chunk_hdr) == SIGNED_IMAGE_CHUNK_HDR_LEN);
memset(&chunk_hdr, 0, SIGNED_IMAGE_CHUNK_HDR_LEN);
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
index 4751faf1..f971063a 100644
--- a/drivers/net/sfc/base/ef10_impl.h
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -190,6 +190,14 @@ extern __checkReturn efx_rc_t
ef10_nic_init(
__in efx_nic_t *enp);
+extern __checkReturn boolean_t
+ef10_nic_hw_unavailable(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_nic_set_hw_unavailable(
+ __in efx_nic_t *enp);
+
#if EFSYS_OPT_DIAG
extern __checkReturn efx_rc_t
@@ -453,7 +461,7 @@ ef10_nvram_partn_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
@@ -477,17 +485,21 @@ ef10_nvram_partn_set_version(
extern __checkReturn efx_rc_t
ef10_nvram_buffer_validate(
- __in efx_nic_t *enp,
__in uint32_t partn,
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size);
+extern void
+ef10_nvram_buffer_init(
+ __out_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
extern __checkReturn efx_rc_t
ef10_nvram_buffer_create(
- __in efx_nic_t *enp,
- __in uint16_t partn_type,
- __in_bcount(buffer_size)
+ __in uint32_t partn_type,
+ __out_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size);
@@ -516,15 +528,26 @@ ef10_nvram_buffer_find_item(
__out uint32_t *lengthp);
extern __checkReturn efx_rc_t
+ef10_nvram_buffer_peek_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *tagp,
+ __out uint32_t *lengthp,
+ __out uint32_t *value_offsetp);
+
+extern __checkReturn efx_rc_t
ef10_nvram_buffer_get_item(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
__in uint32_t length,
- __out_bcount_part(item_max_size, *lengthp)
- caddr_t itemp,
- __in size_t item_max_size,
+ __out uint32_t *tagp,
+ __out_bcount_part(value_max_size, *lengthp)
+ caddr_t valuep,
+ __in size_t value_max_size,
__out uint32_t *lengthp);
extern __checkReturn efx_rc_t
@@ -533,7 +556,19 @@ ef10_nvram_buffer_insert_item(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __in_bcount(length) caddr_t keyp,
+ __in uint32_t tag,
+ __in_bcount(length) caddr_t valuep,
+ __in uint32_t length,
+ __out uint32_t *lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_modify_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t tag,
+ __in_bcount(length) caddr_t valuep,
__in uint32_t length,
__out uint32_t *lengthp);
@@ -558,10 +593,7 @@ ef10_nvram_buffer_finish(
/* PHY */
typedef struct ef10_link_state_s {
- uint32_t els_adv_cap_mask;
- uint32_t els_lp_cap_mask;
- unsigned int els_fcntl;
- efx_link_mode_t els_link_mode;
+ efx_phy_link_state_t epls;
#if EFSYS_OPT_LOOPBACK
efx_loopback_type_t els_loopback;
#endif
@@ -597,6 +629,11 @@ ef10_phy_oui_get(
__in efx_nic_t *enp,
__out uint32_t *ouip);
+extern __checkReturn efx_rc_t
+ef10_phy_link_state_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_link_state_t *eplsp);
+
#if EFSYS_OPT_PHY_STATS
extern __checkReturn efx_rc_t
@@ -1128,11 +1165,12 @@ extern __checkReturn efx_rc_t
efx_mcdi_get_port_modes(
__in efx_nic_t *enp,
__out uint32_t *modesp,
- __out_opt uint32_t *current_modep);
+ __out_opt uint32_t *current_modep,
+ __out_opt uint32_t *default_modep);
extern __checkReturn efx_rc_t
ef10_nic_get_port_mode_bandwidth(
- __in uint32_t port_mode,
+ __in efx_nic_t *enp,
__out uint32_t *bandwidth_mbpsp);
extern __checkReturn efx_rc_t
diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c
index 1ffe266b..efa15712 100644
--- a/drivers/net/sfc/base/ef10_intr.c
+++ b/drivers/net/sfc/base/ef10_intr.c
@@ -51,8 +51,8 @@ efx_mcdi_trigger_interrupt(
__in unsigned int level)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
- MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+ MC_CMD_TRIGGER_INTERRUPT_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
@@ -64,7 +64,6 @@ efx_mcdi_trigger_interrupt(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c
index 1031e836..9f10f6f7 100644
--- a/drivers/net/sfc/base/ef10_mac.c
+++ b/drivers/net/sfc/base/ef10_mac.c
@@ -22,10 +22,10 @@ ef10_mac_poll(
if ((rc = ef10_phy_get_link(enp, &els)) != 0)
goto fail1;
- epp->ep_adv_cap_mask = els.els_adv_cap_mask;
- epp->ep_fcntl = els.els_fcntl;
+ epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
+ epp->ep_fcntl = els.epls.epls_fcntl;
- *link_modep = els.els_link_mode;
+ *link_modep = els.epls.epls_link_mode;
return (0);
@@ -75,11 +75,10 @@ efx_mcdi_vadapter_set_mac(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
- MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+ MC_CMD_VADAPTOR_SET_MAC_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
@@ -141,11 +140,10 @@ efx_mcdi_mtu_set(
__in uint32_t mtu)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
- MC_CMD_SET_MAC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
@@ -178,11 +176,10 @@ efx_mcdi_mtu_get(
__out size_t *mtu)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
- MC_CMD_SET_MAC_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_V2_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
@@ -274,11 +271,10 @@ ef10_mac_reconfigure(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
- MC_CMD_SET_MAC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
@@ -412,7 +408,7 @@ ef10_mac_filter_default_rxq_clear(
ef10_filter_default_rxq_clear(enp);
- efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ (void) efx_filter_reconfigure(enp, epp->ep_mac_addr,
epp->ep_all_unicst, epp->ep_mulcst,
epp->ep_all_mulcst, epp->ep_brdcst,
epp->ep_mulcst_addr_list,
@@ -654,7 +650,7 @@ ef10_mac_stats_update(
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
- EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
index 7dbf843b..50e23b7d 100644
--- a/drivers/net/sfc/base/ef10_nic.c
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -20,15 +20,14 @@ efx_mcdi_get_port_assignment(
__out uint32_t *portp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
- MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD ||
enp->en_family == EFX_FAMILY_MEDFORD2);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
@@ -63,18 +62,18 @@ fail1:
efx_mcdi_get_port_modes(
__in efx_nic_t *enp,
__out uint32_t *modesp,
- __out_opt uint32_t *current_modep)
+ __out_opt uint32_t *current_modep,
+ __out_opt uint32_t *default_modep)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
- MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD ||
enp->en_family == EFX_FAMILY_MEDFORD2);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_MODES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
@@ -110,6 +109,11 @@ efx_mcdi_get_port_modes(
GET_PORT_MODES_OUT_CURRENT_MODE);
}
+ if (default_modep != NULL) {
+ *default_modep = MCDI_OUT_DWORD(req,
+ GET_PORT_MODES_OUT_DEFAULT_MODE);
+ }
+
return (0);
fail3:
@@ -124,44 +128,99 @@ fail1:
__checkReturn efx_rc_t
ef10_nic_get_port_mode_bandwidth(
- __in uint32_t port_mode,
+ __in efx_nic_t *enp,
__out uint32_t *bandwidth_mbpsp)
{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ efx_port_t *epp = &(enp->en_port);
+
+ uint32_t single_lane;
+ uint32_t dual_lane;
+ uint32_t quad_lane;
uint32_t bandwidth;
efx_rc_t rc;
- switch (port_mode) {
- case TLV_PORT_MODE_10G:
- bandwidth = 10000;
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode, NULL)) != 0) {
+ /* No port mode info available. */
+ goto fail1;
+ }
+
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX))
+ single_lane = 25000;
+ else
+ single_lane = 10000;
+
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX))
+ dual_lane = 50000;
+ else
+ dual_lane = 20000;
+
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX))
+ quad_lane = 100000;
+ else
+ quad_lane = 40000;
+
+ switch (current_mode) {
+ case TLV_PORT_MODE_1x1_NA: /* mode 0 */
+ bandwidth = single_lane;
+ break;
+ case TLV_PORT_MODE_1x2_NA: /* mode 10 */
+ case TLV_PORT_MODE_NA_1x2: /* mode 11 */
+ bandwidth = dual_lane;
+ break;
+ case TLV_PORT_MODE_1x1_1x1: /* mode 2 */
+ bandwidth = single_lane + single_lane;
+ break;
+ case TLV_PORT_MODE_4x1_NA: /* mode 4 */
+ case TLV_PORT_MODE_NA_4x1: /* mode 8 */
+ bandwidth = 4 * single_lane;
+ break;
+ case TLV_PORT_MODE_2x1_2x1: /* mode 5 */
+ bandwidth = (2 * single_lane) + (2 * single_lane);
+ break;
+ case TLV_PORT_MODE_1x2_1x2: /* mode 12 */
+ bandwidth = dual_lane + dual_lane;
+ break;
+ case TLV_PORT_MODE_1x2_2x1: /* mode 17 */
+ case TLV_PORT_MODE_2x1_1x2: /* mode 18 */
+ bandwidth = dual_lane + (2 * single_lane);
break;
- case TLV_PORT_MODE_10G_10G:
- bandwidth = 10000 * 2;
+ /* Legacy Medford-only mode. Do not use (see bug63270) */
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */
+ bandwidth = 4 * single_lane;
break;
- case TLV_PORT_MODE_10G_10G_10G_10G:
- case TLV_PORT_MODE_10G_10G_10G_10G_Q:
- case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
- case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
- bandwidth = 10000 * 4;
+ case TLV_PORT_MODE_1x4_NA: /* mode 1 */
+ case TLV_PORT_MODE_NA_1x4: /* mode 22 */
+ bandwidth = quad_lane;
break;
- case TLV_PORT_MODE_40G:
- bandwidth = 40000;
+ case TLV_PORT_MODE_2x2_NA: /* mode 13 */
+ case TLV_PORT_MODE_NA_2x2: /* mode 14 */
+ bandwidth = 2 * dual_lane;
break;
- case TLV_PORT_MODE_40G_40G:
- bandwidth = 40000 * 2;
+ case TLV_PORT_MODE_1x4_2x1: /* mode 6 */
+ case TLV_PORT_MODE_2x1_1x4: /* mode 7 */
+ bandwidth = quad_lane + (2 * single_lane);
break;
- case TLV_PORT_MODE_40G_10G_10G:
- case TLV_PORT_MODE_10G_10G_40G:
- bandwidth = 40000 + (10000 * 2);
+ case TLV_PORT_MODE_1x4_1x2: /* mode 15 */
+ case TLV_PORT_MODE_1x2_1x4: /* mode 16 */
+ bandwidth = quad_lane + dual_lane;
+ break;
+ case TLV_PORT_MODE_1x4_1x4: /* mode 3 */
+ bandwidth = quad_lane + quad_lane;
break;
default:
rc = EINVAL;
- goto fail1;
+ goto fail2;
}
*bandwidth_mbpsp = bandwidth;
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -174,13 +233,12 @@ efx_mcdi_vadaptor_alloc(
__in uint32_t port_id)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
- MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
@@ -213,11 +271,10 @@ efx_mcdi_vadaptor_free(
__in uint32_t port_id)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
- MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VADAPTOR_FREE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
@@ -247,15 +304,14 @@ efx_mcdi_get_mac_address_pf(
__out_ecount_opt(6) uint8_t mac_addrp[6])
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD ||
enp->en_family == EFX_FAMILY_MEDFORD2);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
@@ -306,15 +362,14 @@ efx_mcdi_get_mac_address_vf(
__out_ecount_opt(6) uint8_t mac_addrp[6])
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD ||
enp->en_family == EFX_FAMILY_MEDFORD2);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
@@ -371,15 +426,14 @@ efx_mcdi_get_clock(
__out uint32_t *dpcpu_freqp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
- MC_CMD_GET_CLOCK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD ||
enp->en_family == EFX_FAMILY_MEDFORD2);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_CLOCK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
@@ -429,12 +483,11 @@ efx_mcdi_get_rxdp_config(
__out uint32_t *end_paddingp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
- MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
uint32_t end_padding;
efx_rc_t rc;
- memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
@@ -489,11 +542,10 @@ efx_mcdi_get_vector_cfg(
__out_opt uint32_t *vf_nvecp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
- MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
@@ -539,8 +591,8 @@ efx_mcdi_alloc_vis(
__out uint32_t *vi_shiftp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
- MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
efx_rc_t rc;
if (vi_countp == NULL) {
@@ -548,7 +600,6 @@ efx_mcdi_alloc_vis(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_ALLOC_VIS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
@@ -631,8 +682,8 @@ efx_mcdi_alloc_piobuf(
__out efx_piobuf_handle_t *handlep)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
- MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN);
efx_rc_t rc;
if (handlep == NULL) {
@@ -640,7 +691,6 @@ efx_mcdi_alloc_piobuf(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
@@ -679,11 +729,10 @@ efx_mcdi_free_piobuf(
__in efx_piobuf_handle_t handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
- MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FREE_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
@@ -714,11 +763,10 @@ efx_mcdi_link_piobuf(
__in efx_piobuf_handle_t handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LINK_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
@@ -749,11 +797,10 @@ efx_mcdi_unlink_piobuf(
__in uint32_t vi_index)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
@@ -806,7 +853,7 @@ fail1:
for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
- efx_mcdi_free_piobuf(enp, *handlep);
+ (void) efx_mcdi_free_piobuf(enp, *handlep);
*handlep = EFX_PIOBUF_HANDLE_INVALID;
}
enp->en_arch.ef10.ena_piobuf_count = 0;
@@ -823,7 +870,7 @@ ef10_nic_free_piobufs(
for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
- efx_mcdi_free_piobuf(enp, *handlep);
+ (void) efx_mcdi_free_piobuf(enp, *handlep);
*handlep = EFX_PIOBUF_HANDLE_INVALID;
}
enp->en_arch.ef10.ena_piobuf_count = 0;
@@ -951,11 +998,10 @@ ef10_mcdi_get_pf_count(
__out uint32_t *pf_countp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
- MC_CMD_GET_PF_COUNT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
+ MC_CMD_GET_PF_COUNT_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PF_COUNT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
@@ -995,15 +1041,14 @@ ef10_get_datapath_caps(
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
- MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V5_OUT_LEN);
efx_rc_t rc;
if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
goto fail1;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_CAPABILITIES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
@@ -1041,11 +1086,13 @@ ef10_get_datapath_caps(
}
encp->enc_rx_prefix_size = 14;
+#if EFSYS_OPT_RX_SCALE
/* Check if the firmware supports additional RSS modes */
if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
encp->enc_rx_scale_additional_modes_supported = B_TRUE;
else
encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+#endif /* EFSYS_OPT_RX_SCALE */
/* Check if the firmware supports TSO */
if (CAP_FLAGS1(req, TX_TSO))
@@ -1251,6 +1298,7 @@ ef10_get_datapath_caps(
else
encp->enc_hlb_counters = B_FALSE;
+#if EFSYS_OPT_RX_SCALE
if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
/* Only one exclusive RSS context is available per port. */
encp->enc_rx_scale_max_exclusive_contexts = 1;
@@ -1300,6 +1348,8 @@ ef10_get_datapath_caps(
*/
encp->enc_rx_scale_l4_hash_supported = B_TRUE;
}
+#endif /* EFSYS_OPT_RX_SCALE */
+
/* Check if the firmware supports "FLAG" and "MARK" filter actions */
if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
encp->enc_filter_action_flag_supported = B_TRUE;
@@ -1323,8 +1373,10 @@ ef10_get_datapath_caps(
return (0);
+#if EFSYS_OPT_RX_SCALE
fail5:
EFSYS_PROBE(fail5);
+#endif /* EFSYS_OPT_RX_SCALE */
fail4:
EFSYS_PROBE(fail4);
fail3:
@@ -1478,8 +1530,8 @@ static struct ef10_external_port_map_s {
*/
{
EFX_FAMILY_MEDFORD,
- (1U << TLV_PORT_MODE_10G) | /* mode 0 */
- (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */
+ (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
+ (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */
1, /* ports per cage */
1 /* first cage */
},
@@ -1493,10 +1545,10 @@ static struct ef10_external_port_map_s {
*/
{
EFX_FAMILY_MEDFORD,
- (1U << TLV_PORT_MODE_40G) | /* mode 1 */
- (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
- (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
- (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */
+ (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
+ (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
+ (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
+ (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
/* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
(1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
2, /* ports per cage */
@@ -1512,9 +1564,9 @@ static struct ef10_external_port_map_s {
*/
{
EFX_FAMILY_MEDFORD,
- (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */
+ (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */
/* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
- (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */
+ (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */
4, /* ports per cage */
1 /* first cage */
},
@@ -1528,7 +1580,7 @@ static struct ef10_external_port_map_s {
*/
{
EFX_FAMILY_MEDFORD,
- (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */
+ (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */
4, /* ports per cage */
2 /* first cage */
},
@@ -1635,13 +1687,14 @@ ef10_external_port_mapping(
int32_t count = 1; /* Default 1-1 mapping */
int32_t offset = 1; /* Default starting external port number */
- if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current,
+ NULL)) != 0) {
/*
* No current port mode information (i.e. Huntington)
* - infer mapping from available modes
*/
if ((rc = efx_mcdi_get_port_modes(enp,
- &port_modes, NULL)) != 0) {
+ &port_modes, NULL, NULL)) != 0) {
/*
* No port mode information available
* - use default mapping
@@ -1781,11 +1834,26 @@ ef10_nic_board_cfg(
if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
goto fail6;
+ /*
+ * Firmware with support for *_FEC capability bits does not
+ * report that the corresponding *_FEC_REQUESTED bits are supported.
+ * Add them here so that drivers understand that they are supported.
+ */
+ if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
+ epp->ep_phy_cap_mask |=
+ (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
+ if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
+ epp->ep_phy_cap_mask |=
+ (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
+ if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
+ epp->ep_phy_cap_mask |=
+ (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
+
/* Obtain the default PHY advertised capabilities */
if ((rc = ef10_phy_get_link(enp, &els)) != 0)
goto fail7;
- epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
- epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
/* Check capabilities of running datapath firmware */
if ((rc = ef10_get_datapath_caps(enp)) != 0)
@@ -2039,8 +2107,8 @@ ef10_nic_reset(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
- MC_CMD_ENTITY_RESET_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN);
efx_rc_t rc;
/* ef10_nic_reset() is called to recover from BADASSERT failures. */
@@ -2049,7 +2117,6 @@ ef10_nic_reset(
if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
goto fail2;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_ENTITY_RESET;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
@@ -2314,6 +2381,36 @@ fail1:
return (rc);
}
+ __checkReturn boolean_t
+ef10_nic_hw_unavailable(
+ __in efx_nic_t *enp)
+{
+ efx_dword_t dword;
+
+ if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
+ return (B_TRUE);
+
+ EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
+ if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
+ goto unavail;
+
+ return (B_FALSE);
+
+unavail:
+ ef10_nic_set_hw_unavailable(enp);
+
+ return (B_TRUE);
+}
+
+ void
+ef10_nic_set_hw_unavailable(
+ __in efx_nic_t *enp)
+{
+ EFSYS_PROBE(hw_unavail);
+ enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
+}
+
+
void
ef10_nic_fini(
__in efx_nic_t *enp)
@@ -2386,11 +2483,10 @@ efx_mcdi_get_nic_global(
__out uint32_t *valuep)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_NIC_GLOBAL_IN_LEN,
- MC_CMD_GET_NIC_GLOBAL_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
+ MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
@@ -2430,10 +2526,9 @@ efx_mcdi_set_nic_global(
__in uint32_t value)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_SET_NIC_GLOBAL_IN_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c
index 2883ec8f..8d1b64f2 100644
--- a/drivers/net/sfc/base/ef10_nvram.c
+++ b/drivers/net/sfc/base/ef10_nvram.c
@@ -203,14 +203,14 @@ tlv_validate_state(
if (tlv_tag(cursor) != TLV_TAG_END) {
/* Check current item has space for tag and length */
- if (cursor->current > (cursor->limit - 2)) {
+ if (cursor->current > (cursor->limit - 1)) {
cursor->current = NULL;
rc = EFAULT;
goto fail3;
}
- /* Check we have value data for current item and another tag */
- if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) {
+ /* Check we have value data for current item and an END tag */
+ if (tlv_next_item_ptr(cursor) > cursor->limit) {
cursor->current = NULL;
rc = EFAULT;
goto fail4;
@@ -635,7 +635,6 @@ fail1:
/* Validate buffer contents (before writing to flash) */
__checkReturn efx_rc_t
ef10_nvram_buffer_validate(
- __in efx_nic_t *enp,
__in uint32_t partn,
__in_bcount(partn_size) caddr_t partn_data,
__in size_t partn_size)
@@ -648,7 +647,6 @@ ef10_nvram_buffer_validate(
int pos;
efx_rc_t rc;
- _NOTE(ARGUNUSED(enp, partn))
EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
if ((partn_data == NULL) || (partn_size == 0)) {
@@ -675,26 +673,32 @@ ef10_nvram_buffer_validate(
goto fail4;
}
+ /* Check partition header matches partn */
+ if (__LE_TO_CPU_16(header->type_id) != partn) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
/* Check partition ends with PARTITION_TRAILER and END tags */
if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
rc = EINVAL;
- goto fail5;
+ goto fail6;
}
trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
if ((rc = tlv_advance(&cursor)) != 0) {
rc = EINVAL;
- goto fail6;
+ goto fail7;
}
if (tlv_tag(&cursor) != TLV_TAG_END) {
rc = EINVAL;
- goto fail7;
+ goto fail8;
}
/* Check generation counts are consistent */
if (trailer->generation != header->generation) {
rc = EINVAL;
- goto fail8;
+ goto fail9;
}
/* Verify partition checksum */
@@ -704,11 +708,13 @@ ef10_nvram_buffer_validate(
}
if (cksum != 0) {
rc = EINVAL;
- goto fail9;
+ goto fail10;
}
return (0);
+fail10:
+ EFSYS_PROBE(fail10);
fail9:
EFSYS_PROBE(fail9);
fail8:
@@ -731,13 +737,24 @@ fail1:
return (rc);
}
+ void
+ef10_nvram_buffer_init(
+ __out_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ uint32_t *buf = (uint32_t *)bufferp;
+
+ memset(buf, 0xff, buffer_size);
+ tlv_init_block(buf);
+}
__checkReturn efx_rc_t
ef10_nvram_buffer_create(
- __in efx_nic_t *enp,
- __in uint16_t partn_type,
- __in_bcount(partn_size) caddr_t partn_data,
+ __in uint32_t partn_type,
+ __out_bcount(partn_size)
+ caddr_t partn_data,
__in size_t partn_size)
{
uint32_t *buf = (uint32_t *)partn_data;
@@ -753,9 +770,8 @@ ef10_nvram_buffer_create(
goto fail1;
}
- memset(buf, 0xff, partn_size);
+ ef10_nvram_buffer_init(partn_data, partn_size);
- tlv_init_block(buf);
if ((rc = tlv_init_cursor(&cursor, buf,
(uint32_t *)((uint8_t *)buf + partn_size),
buf)) != 0) {
@@ -787,7 +803,7 @@ ef10_nvram_buffer_create(
goto fail6;
/* Check that the partition is valid. */
- if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
+ if ((rc = ef10_nvram_buffer_validate(partn_type,
partn_data, partn_size)) != 0)
goto fail7;
@@ -959,22 +975,65 @@ ef10_nvram_buffer_find_item(
}
__checkReturn efx_rc_t
+ef10_nvram_buffer_peek_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *tagp,
+ __out uint32_t *lengthp,
+ __out uint32_t *value_offsetp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ uint32_t tag;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ tag = tlv_tag(&cursor);
+ *tagp = tag;
+ if (tag == TLV_TAG_END) {
+ /*
+ * To allow stepping over the END tag, report the full tag
+ * length and a zero length value.
+ */
+ *lengthp = sizeof (tag);
+ *value_offsetp = sizeof (tag);
+ } else {
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+ *value_offsetp = byte_offset((uint32_t *)tlv_value(&cursor),
+ cursor.current);
+ }
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
ef10_nvram_buffer_get_item(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
__in uint32_t length,
- __out_bcount_part(item_max_size, *lengthp)
- caddr_t itemp,
- __in size_t item_max_size,
+ __out uint32_t *tagp,
+ __out_bcount_part(value_max_size, *lengthp)
+ caddr_t valuep,
+ __in size_t value_max_size,
__out uint32_t *lengthp)
{
efx_rc_t rc;
tlv_cursor_t cursor;
- uint32_t item_length;
+ uint32_t value_length;
- if (item_max_size < length) {
+ if (buffer_size < (offset + length)) {
rc = ENOSPC;
goto fail1;
}
@@ -984,14 +1043,15 @@ ef10_nvram_buffer_get_item(
goto fail2;
}
- item_length = tlv_length(&cursor);
- if (length < item_length) {
+ value_length = tlv_length(&cursor);
+ if (value_max_size < value_length) {
rc = ENOSPC;
goto fail3;
}
- memcpy(itemp, tlv_value(&cursor), item_length);
+ memcpy(valuep, tlv_value(&cursor), value_length);
- *lengthp = item_length;
+ *tagp = tlv_tag(&cursor);
+ *lengthp = value_length;
return (0);
@@ -1011,7 +1071,45 @@ ef10_nvram_buffer_insert_item(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __in_bcount(length) caddr_t keyp,
+ __in uint32_t tag,
+ __in_bcount(length) caddr_t valuep,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ rc = tlv_insert(&cursor, tag, (uint8_t *)valuep, length);
+
+ if (rc != 0)
+ goto fail2;
+
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_modify_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t tag,
+ __in_bcount(length) caddr_t valuep,
__in uint32_t length,
__out uint32_t *lengthp)
{
@@ -1023,7 +1121,7 @@ ef10_nvram_buffer_insert_item(
goto fail1;
}
- rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
+ rc = tlv_modify(&cursor, tag, (uint8_t *)valuep, length);
if (rc != 0) {
goto fail2;
@@ -1042,6 +1140,7 @@ fail1:
return (rc);
}
+
__checkReturn efx_rc_t
ef10_nvram_buffer_delete_item(
__in_bcount(buffer_size)
@@ -1808,7 +1907,7 @@ ef10_nvram_partn_write_segment_tlv(
goto fail7;
/* Unlock the partition */
- ef10_nvram_partn_unlock(enp, partn, NULL);
+ (void) ef10_nvram_partn_unlock(enp, partn, NULL);
EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
@@ -1823,7 +1922,7 @@ fail5:
fail4:
EFSYS_PROBE(fail4);
- ef10_nvram_partn_unlock(enp, partn, NULL);
+ (void) ef10_nvram_partn_unlock(enp, partn, NULL);
fail3:
EFSYS_PROBE(fail3);
@@ -2000,7 +2099,7 @@ ef10_nvram_partn_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size)
{
size_t chunk;
@@ -2168,6 +2267,8 @@ static ef10_parttbl_entry_t medford2_parttbl[] = {
PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM),
PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
+ PARTN_MAP_ENTRY(DYNCONFIG_DEFAULTS, ALL, DYNCONFIG_DEFAULTS),
+ PARTN_MAP_ENTRY(ROMCONFIG_DEFAULTS, ALL, ROMCONFIG_DEFAULTS),
};
static __checkReturn efx_rc_t
diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c
index 84acb70a..84ccdde5 100644
--- a/drivers/net/sfc/base/ef10_phy.c
+++ b/drivers/net/sfc/base/ef10_phy.c
@@ -98,8 +98,10 @@ mcdi_phy_decode_link_mode(
__in uint32_t link_flags,
__in unsigned int speed,
__in unsigned int fcntl,
+ __in uint32_t fec,
__out efx_link_mode_t *link_modep,
- __out unsigned int *fcntlp)
+ __out unsigned int *fcntlp,
+ __out efx_phy_fec_type_t *fecp)
{
boolean_t fd = !!(link_flags &
(1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
@@ -141,6 +143,22 @@ mcdi_phy_decode_link_mode(
EFSYS_PROBE1(mc_pcol_error, int, fcntl);
*fcntlp = 0;
}
+
+ switch (fec) {
+ case MC_CMD_FEC_NONE:
+ *fecp = EFX_PHY_FEC_NONE;
+ break;
+ case MC_CMD_FEC_BASER:
+ *fecp = EFX_PHY_FEC_BASER;
+ break;
+ case MC_CMD_FEC_RS:
+ *fecp = EFX_PHY_FEC_RS;
+ break;
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, fec);
+ *fecp = EFX_PHY_FEC_NONE;
+ break;
+ }
}
@@ -154,6 +172,7 @@ ef10_phy_link_ev(
unsigned int link_flags;
unsigned int speed;
unsigned int fcntl;
+ efx_phy_fec_type_t fec = MC_CMD_FEC_NONE;
efx_link_mode_t link_mode;
uint32_t lp_cap_mask;
@@ -191,7 +210,8 @@ ef10_phy_link_ev(
link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
mcdi_phy_decode_link_mode(enp, link_flags, speed,
MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
- &link_mode, &fcntl);
+ MC_CMD_FEC_NONE, &link_mode,
+ &fcntl, &fec);
mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
&lp_cap_mask);
@@ -242,16 +262,16 @@ ef10_phy_get_link(
__out ef10_link_state_t *elsp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
- MC_CMD_GET_LINK_OUT_LEN)];
+ uint32_t fec;
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_V2_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_V2_LEN;
efx_mcdi_execute(enp, &req);
@@ -266,14 +286,28 @@ ef10_phy_get_link(
}
mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
- &elsp->els_adv_cap_mask);
+ &elsp->epls.epls_adv_cap_mask);
mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
- &elsp->els_lp_cap_mask);
+ &elsp->epls.epls_lp_cap_mask);
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_V2_LEN)
+ fec = MC_CMD_FEC_NONE;
+ else
+ fec = MCDI_OUT_DWORD(req, GET_LINK_OUT_V2_FEC_TYPE);
mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
- &elsp->els_link_mode, &elsp->els_fcntl);
+ fec, &elsp->epls.epls_link_mode,
+ &elsp->epls.epls_fcntl, &elsp->epls.epls_fec);
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_V2_LEN) {
+ elsp->epls.epls_ld_cap_mask = 0;
+ } else {
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_V2_LD_CAP),
+ &elsp->epls.epls_ld_cap_mask);
+ }
+
#if EFSYS_OPT_LOOPBACK
/*
@@ -301,8 +335,8 @@ ef10_phy_reconfigure(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
- MC_CMD_SET_LINK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN);
uint32_t cap_mask;
#if EFSYS_OPT_PHY_LED_CONTROL
unsigned int led_mode;
@@ -316,7 +350,6 @@ ef10_phy_reconfigure(
if (supported == B_FALSE)
goto out;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
@@ -464,12 +497,11 @@ ef10_phy_verify(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
- MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN);
uint32_t state;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
@@ -518,6 +550,29 @@ ef10_phy_oui_get(
return (ENOTSUP);
}
+ __checkReturn efx_rc_t
+ef10_phy_link_state_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_link_state_t *eplsp)
+{
+ efx_rc_t rc;
+ ef10_link_state_t els;
+
+ /* Obtain the active link state */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ *eplsp = els.epls;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
#if EFSYS_OPT_PHY_STATS
__checkReturn efx_rc_t
@@ -583,22 +638,34 @@ ef10_bist_poll(
unsigned long *valuesp,
__in size_t count)
{
+ /*
+ * MCDI_CTL_SDU_LEN_MAX_V1 is large enough cover all BIST results,
+ * whilst not wasting stack.
+ */
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX_V1);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
- MCDI_CTL_SDU_LEN_MAX)];
uint32_t value_mask = 0;
uint32_t result;
efx_rc_t rc;
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_SFT9001_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_MRSFP_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_MEM_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+
_NOTE(ARGUNUSED(type))
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_POLL_BIST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX_V1;
efx_mcdi_execute(enp, &req);
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index 313a3691..3c8f4f3b 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -29,8 +29,8 @@ efx_mcdi_init_rxq(
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN,
- MC_CMD_INIT_RXQ_V3_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V3_IN_LEN,
+ MC_CMD_INIT_RXQ_V3_OUT_LEN);
int npages = EFX_RXQ_NBUFS(ndescs);
int i;
efx_qword_t *dma_addr;
@@ -73,7 +73,6 @@ efx_mcdi_init_rxq(
want_outer_classes = B_FALSE;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_RXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN;
@@ -146,11 +145,10 @@ efx_mcdi_fini_rxq(
__in uint32_t instance)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
- MC_CMD_FINI_RXQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FINI_RXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
@@ -188,8 +186,8 @@ efx_mcdi_rss_context_alloc(
__out uint32_t *rss_contextp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
- MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
+ MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
uint32_t rss_context;
uint32_t context_type;
efx_rc_t rc;
@@ -211,7 +209,6 @@ efx_mcdi_rss_context_alloc(
goto fail2;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
@@ -274,8 +271,8 @@ efx_mcdi_rss_context_free(
__in uint32_t rss_context)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
- MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_FREE_OUT_LEN);
efx_rc_t rc;
if (rss_context == EF10_RSS_CONTEXT_INVALID) {
@@ -283,7 +280,6 @@ efx_mcdi_rss_context_free(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
@@ -318,14 +314,9 @@ efx_mcdi_rss_context_set_flags(
__in efx_rx_hash_type_t type)
{
efx_nic_cfg_t *encp = &enp->en_nic_cfg;
- efx_rx_hash_type_t type_ipv4;
- efx_rx_hash_type_t type_ipv4_tcp;
- efx_rx_hash_type_t type_ipv6;
- efx_rx_hash_type_t type_ipv6_tcp;
- efx_rx_hash_type_t modes;
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
- MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN);
efx_rc_t rc;
EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN ==
@@ -350,7 +341,6 @@ efx_mcdi_rss_context_set_flags(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
@@ -360,57 +350,38 @@ efx_mcdi_rss_context_set_flags(
MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
rss_context);
- type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) |
- EFX_RX_HASH(IPV4_UDP, 2TUPLE);
- type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
- type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) |
- EFX_RX_HASH(IPV6_UDP, 2TUPLE);
- type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
-
- /*
- * Create a copy of the original hash type.
- * The copy will be used to fill in RSS_MODE bits and
- * may be cleared beforehand. The original variable
- * and, thus, EN bits will remain unaffected.
- */
- modes = type;
-
/*
* If the firmware lacks support for additional modes, RSS_MODE
* fields must contain zeros, otherwise the operation will fail.
*/
if (encp->enc_rx_scale_additional_modes_supported == B_FALSE)
- modes = 0;
-
-#define EXTRACT_RSS_MODE(_type, _class) \
- (EFX_EXTRACT_NATIVE(_type, 0, 31, \
- EFX_LOW_BIT(EFX_RX_CLASS_##_class), \
- EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \
- EFX_MASK32(EFX_RX_CLASS_##_class))
+ type &= EFX_RX_HASH_LEGACY_MASK;
MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
- ((type & type_ipv4) == type_ipv4) ? 1 : 0,
+ (type & EFX_RX_HASH_IPV4) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
- ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0,
+ (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
- ((type & type_ipv6) == type_ipv6) ? 1 : 0,
+ (type & EFX_RX_HASH_IPV6) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
- ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0,
+ (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE,
- EXTRACT_RSS_MODE(modes, IPV4_TCP),
+ (type >> EFX_RX_CLASS_IPV4_TCP_LBN) &
+ EFX_MASK32(EFX_RX_CLASS_IPV4_TCP),
RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE,
- EXTRACT_RSS_MODE(modes, IPV4_UDP),
+ (type >> EFX_RX_CLASS_IPV4_UDP_LBN) &
+ EFX_MASK32(EFX_RX_CLASS_IPV4_UDP),
RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE,
- EXTRACT_RSS_MODE(modes, IPV4),
+ (type >> EFX_RX_CLASS_IPV4_LBN) & EFX_MASK32(EFX_RX_CLASS_IPV4),
RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE,
- EXTRACT_RSS_MODE(modes, IPV6_TCP),
+ (type >> EFX_RX_CLASS_IPV6_TCP_LBN) &
+ EFX_MASK32(EFX_RX_CLASS_IPV6_TCP),
RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE,
- EXTRACT_RSS_MODE(modes, IPV6_UDP),
+ (type >> EFX_RX_CLASS_IPV6_UDP_LBN) &
+ EFX_MASK32(EFX_RX_CLASS_IPV6_UDP),
RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE,
- EXTRACT_RSS_MODE(modes, IPV6));
-
-#undef EXTRACT_RSS_MODE
+ (type >> EFX_RX_CLASS_IPV6_LBN) & EFX_MASK32(EFX_RX_CLASS_IPV6));
efx_mcdi_execute(enp, &req);
@@ -439,8 +410,8 @@ efx_mcdi_rss_context_set_key(
__in size_t n)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
- MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN);
efx_rc_t rc;
if (rss_context == EF10_RSS_CONTEXT_INVALID) {
@@ -448,7 +419,6 @@ efx_mcdi_rss_context_set_key(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
@@ -496,8 +466,8 @@ efx_mcdi_rss_context_set_table(
__in size_t n)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
- MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN);
uint8_t *req_table;
int i, rc;
@@ -506,7 +476,6 @@ efx_mcdi_rss_context_set_table(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
diff --git a/drivers/net/sfc/base/ef10_signed_image_layout.h b/drivers/net/sfc/base/ef10_signed_image_layout.h
index a35d1601..c25ffe2f 100644
--- a/drivers/net/sfc/base/ef10_signed_image_layout.h
+++ b/drivers/net/sfc/base/ef10_signed_image_layout.h
@@ -4,6 +4,14 @@
* All rights reserved.
*/
+/*
+ * This is NOT the original source file. Do NOT edit it.
+ * To update the image layout headers, please edit the copy in
+ * the sfregistry repo and then, in that repo,
+ * "make layout_headers" or "make export" to
+ * regenerate and export all types of headers.
+ */
+
/* These structures define the layouts for the signed firmware image binary
* saved in NVRAM. The original image is in the Cryptographic message
* syntax (CMS) format which contains the bootable firmware binary plus the
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index 7d27f710..5f3df42b 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -31,8 +31,8 @@ efx_mcdi_init_txq(
__in efsys_mem_t *esmp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
- MC_CMD_INIT_TXQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+ MC_CMD_INIT_TXQ_OUT_LEN);
efx_qword_t *dma_addr;
uint64_t addr;
int npages;
@@ -53,7 +53,6 @@ efx_mcdi_init_txq(
goto fail2;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_TXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
@@ -120,11 +119,10 @@ efx_mcdi_fini_txq(
__in uint32_t instance)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
- MC_CMD_FINI_TXQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FINI_TXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
@@ -278,7 +276,7 @@ ef10_tx_qpio_enable(
fail3:
EFSYS_PROBE(fail3);
- ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
fail2:
EFSYS_PROBE(fail2);
etp->et_pio_size = 0;
@@ -296,10 +294,12 @@ ef10_tx_qpio_disable(
if (etp->et_pio_size != 0) {
/* Unlink the piobuf from this TXQ */
- ef10_nic_pio_unlink(enp, etp->et_index);
+ if (ef10_nic_pio_unlink(enp, etp->et_index) != 0)
+ return;
/* Free the sub-allocated PIO block */
- ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum,
+ etp->et_pio_blknum);
etp->et_pio_size = 0;
etp->et_pio_write_offset = 0;
}
@@ -539,12 +539,9 @@ ef10_tx_qdesc_post(
{
unsigned int added = *addedp;
unsigned int i;
- efx_rc_t rc;
- if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
- rc = ENOSPC;
- goto fail1;
- }
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ return (ENOSPC);
for (i = 0; i < ndescs; i++) {
efx_desc_t *edp = &ed[i];
@@ -564,11 +561,6 @@ ef10_tx_qdesc_post(
*addedp = added;
return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
}
void
@@ -637,22 +629,22 @@ ef10_tx_qdesc_tso2_create(
EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
- EFX_POPULATE_QWORD_6(edp[0].ed_eq,
+ EFX_POPULATE_QWORD_5(edp[0].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
ESF_DZ_TX_TSO_IP_ID, ipv4_id,
- ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
- EFX_POPULATE_QWORD_4(edp[1].ed_eq,
+ EFX_POPULATE_QWORD_5(edp[1].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
- ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
+ ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
+ ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
}
void
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
index 5108b9b1..8e10e893 100644
--- a/drivers/net/sfc/base/efx.h
+++ b/drivers/net/sfc/base/efx.h
@@ -7,6 +7,7 @@
#ifndef _SYS_EFX_H
#define _SYS_EFX_H
+#include "efx_annote.h"
#include "efsys.h"
#include "efx_check.h"
#include "efx_phy_ids.h"
@@ -154,6 +155,14 @@ extern __checkReturn efx_rc_t
efx_nic_reset(
__in efx_nic_t *enp);
+extern __checkReturn boolean_t
+efx_nic_hw_unavailable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_set_hw_unavailable(
+ __in efx_nic_t *enp);
+
#if EFSYS_OPT_DIAG
extern __checkReturn efx_rc_t
@@ -661,77 +670,74 @@ efx_mon_init(
#define EFX_MON_STATS_PAGE_SIZE 0x100
#define EFX_MON_MASK_ELEMENT_SIZE 32
-/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 400fdb0517af1fca */
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 78b65c8d5af9747b */
typedef enum efx_mon_stat_e {
- EFX_MON_STAT_2_5V,
- EFX_MON_STAT_VCCP1,
- EFX_MON_STAT_VCC,
- EFX_MON_STAT_5V,
- EFX_MON_STAT_12V,
- EFX_MON_STAT_VCCP2,
- EFX_MON_STAT_EXT_TEMP,
- EFX_MON_STAT_INT_TEMP,
- EFX_MON_STAT_AIN1,
- EFX_MON_STAT_AIN2,
- EFX_MON_STAT_INT_COOLING,
- EFX_MON_STAT_EXT_COOLING,
- EFX_MON_STAT_1V,
- EFX_MON_STAT_1_2V,
- EFX_MON_STAT_1_8V,
- EFX_MON_STAT_3_3V,
- EFX_MON_STAT_1_2VA,
- EFX_MON_STAT_VREF,
- EFX_MON_STAT_VAOE,
+ EFX_MON_STAT_CONTROLLER_TEMP,
+ EFX_MON_STAT_PHY_COMMON_TEMP,
+ EFX_MON_STAT_CONTROLLER_COOLING,
+ EFX_MON_STAT_PHY0_TEMP,
+ EFX_MON_STAT_PHY0_COOLING,
+ EFX_MON_STAT_PHY1_TEMP,
+ EFX_MON_STAT_PHY1_COOLING,
+ EFX_MON_STAT_IN_1V0,
+ EFX_MON_STAT_IN_1V2,
+ EFX_MON_STAT_IN_1V8,
+ EFX_MON_STAT_IN_2V5,
+ EFX_MON_STAT_IN_3V3,
+ EFX_MON_STAT_IN_12V0,
+ EFX_MON_STAT_IN_1V2A,
+ EFX_MON_STAT_IN_VREF,
+ EFX_MON_STAT_OUT_VAOE,
EFX_MON_STAT_AOE_TEMP,
EFX_MON_STAT_PSU_AOE_TEMP,
EFX_MON_STAT_PSU_TEMP,
- EFX_MON_STAT_FAN0,
- EFX_MON_STAT_FAN1,
- EFX_MON_STAT_FAN2,
- EFX_MON_STAT_FAN3,
- EFX_MON_STAT_FAN4,
- EFX_MON_STAT_VAOE_IN,
- EFX_MON_STAT_IAOE,
- EFX_MON_STAT_IAOE_IN,
+ EFX_MON_STAT_FAN_0,
+ EFX_MON_STAT_FAN_1,
+ EFX_MON_STAT_FAN_2,
+ EFX_MON_STAT_FAN_3,
+ EFX_MON_STAT_FAN_4,
+ EFX_MON_STAT_IN_VAOE,
+ EFX_MON_STAT_OUT_IAOE,
+ EFX_MON_STAT_IN_IAOE,
EFX_MON_STAT_NIC_POWER,
- EFX_MON_STAT_0_9V,
- EFX_MON_STAT_I0_9V,
- EFX_MON_STAT_I1_2V,
- EFX_MON_STAT_0_9V_ADC,
- EFX_MON_STAT_INT_TEMP2,
- EFX_MON_STAT_VREG_TEMP,
- EFX_MON_STAT_VREG_0_9V_TEMP,
- EFX_MON_STAT_VREG_1_2V_TEMP,
- EFX_MON_STAT_INT_VPTAT,
- EFX_MON_STAT_INT_ADC_TEMP,
- EFX_MON_STAT_EXT_VPTAT,
- EFX_MON_STAT_EXT_ADC_TEMP,
+ EFX_MON_STAT_IN_0V9,
+ EFX_MON_STAT_IN_I0V9,
+ EFX_MON_STAT_IN_I1V2,
+ EFX_MON_STAT_IN_0V9_ADC,
+ EFX_MON_STAT_CONTROLLER_2_TEMP,
+ EFX_MON_STAT_VREG_INTERNAL_TEMP,
+ EFX_MON_STAT_VREG_0V9_TEMP,
+ EFX_MON_STAT_VREG_1V2_TEMP,
+ EFX_MON_STAT_CONTROLLER_VPTAT,
+ EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC,
+ EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC,
EFX_MON_STAT_AMBIENT_TEMP,
EFX_MON_STAT_AIRFLOW,
EFX_MON_STAT_VDD08D_VSS08D_CSR,
EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC,
EFX_MON_STAT_HOTPOINT_TEMP,
- EFX_MON_STAT_PHY_POWER_SWITCH_PORT0,
- EFX_MON_STAT_PHY_POWER_SWITCH_PORT1,
+ EFX_MON_STAT_PHY_POWER_PORT0,
+ EFX_MON_STAT_PHY_POWER_PORT1,
EFX_MON_STAT_MUM_VCC,
- EFX_MON_STAT_0V9_A,
- EFX_MON_STAT_I0V9_A,
- EFX_MON_STAT_0V9_A_TEMP,
- EFX_MON_STAT_0V9_B,
- EFX_MON_STAT_I0V9_B,
- EFX_MON_STAT_0V9_B_TEMP,
+ EFX_MON_STAT_IN_0V9_A,
+ EFX_MON_STAT_IN_I0V9_A,
+ EFX_MON_STAT_VREG_0V9_A_TEMP,
+ EFX_MON_STAT_IN_0V9_B,
+ EFX_MON_STAT_IN_I0V9_B,
+ EFX_MON_STAT_VREG_0V9_B_TEMP,
EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY,
- EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC,
EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY,
- EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC,
EFX_MON_STAT_CONTROLLER_MASTER_VPTAT,
EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP,
- EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC,
- EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC,
EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT,
EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP,
- EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC,
- EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC,
EFX_MON_STAT_SODIMM_VOUT,
EFX_MON_STAT_SODIMM_0_TEMP,
EFX_MON_STAT_SODIMM_1_TEMP,
@@ -740,12 +746,12 @@ typedef enum efx_mon_stat_e {
EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
EFX_MON_STAT_BOARD_FRONT_TEMP,
EFX_MON_STAT_BOARD_BACK_TEMP,
- EFX_MON_STAT_I1V8,
- EFX_MON_STAT_I2V5,
- EFX_MON_STAT_I3V3,
- EFX_MON_STAT_I12V0,
- EFX_MON_STAT_1_3V,
- EFX_MON_STAT_I1V3,
+ EFX_MON_STAT_IN_I1V8,
+ EFX_MON_STAT_IN_I2V5,
+ EFX_MON_STAT_IN_I3V3,
+ EFX_MON_STAT_IN_I12V0,
+ EFX_MON_STAT_IN_1V3,
+ EFX_MON_STAT_IN_I1V3,
EFX_MON_NSTATS
} efx_mon_stat_t;
@@ -759,11 +765,40 @@ typedef enum efx_mon_stat_state_e {
EFX_MON_STAT_STATE_NO_READING = 4,
} efx_mon_stat_state_t;
+typedef enum efx_mon_stat_unit_e {
+ EFX_MON_STAT_UNIT_UNKNOWN = 0,
+ EFX_MON_STAT_UNIT_BOOL,
+ EFX_MON_STAT_UNIT_TEMP_C,
+ EFX_MON_STAT_UNIT_VOLTAGE_MV,
+ EFX_MON_STAT_UNIT_CURRENT_MA,
+ EFX_MON_STAT_UNIT_POWER_W,
+ EFX_MON_STAT_UNIT_RPM,
+ EFX_MON_NUNITS
+} efx_mon_stat_unit_t;
+
typedef struct efx_mon_stat_value_s {
- uint16_t emsv_value;
- uint16_t emsv_state;
+ uint16_t emsv_value;
+ efx_mon_stat_state_t emsv_state;
+ efx_mon_stat_unit_t emsv_unit;
} efx_mon_stat_value_t;
+typedef struct efx_mon_limit_value_s {
+ uint16_t emlv_warning_min;
+ uint16_t emlv_warning_max;
+ uint16_t emlv_fatal_min;
+ uint16_t emlv_fatal_max;
+} efx_mon_stat_limits_t;
+
+typedef enum efx_mon_stat_portmask_e {
+ EFX_MON_STAT_PORTMAP_NONE = 0,
+ EFX_MON_STAT_PORTMAP_PORT0 = 1,
+ EFX_MON_STAT_PORTMAP_PORT1 = 2,
+ EFX_MON_STAT_PORTMAP_PORT2 = 3,
+ EFX_MON_STAT_PORTMAP_PORT3 = 4,
+ EFX_MON_STAT_PORTMAP_ALL = (-1),
+ EFX_MON_STAT_PORTMAP_UNKNOWN = (-2)
+} efx_mon_stat_portmask_t;
+
#if EFSYS_OPT_NAMES
extern const char *
@@ -771,14 +806,39 @@ efx_mon_stat_name(
__in efx_nic_t *enp,
__in efx_mon_stat_t id);
+extern const char *
+efx_mon_stat_description(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id);
+
#endif /* EFSYS_OPT_NAMES */
+extern __checkReturn boolean_t
+efx_mon_mcdi_to_efx_stat(
+ __in int mcdi_index,
+ __out efx_mon_stat_t *statp);
+
+extern __checkReturn boolean_t
+efx_mon_get_stat_unit(
+ __in efx_mon_stat_t stat,
+ __out efx_mon_stat_unit_t *unitp);
+
+extern __checkReturn boolean_t
+efx_mon_get_stat_portmap(
+ __in efx_mon_stat_t stat,
+ __out efx_mon_stat_portmask_t *maskp);
+
extern __checkReturn efx_rc_t
efx_mon_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+extern __checkReturn efx_rc_t
+efx_mon_limits_update(
+ __in efx_nic_t *enp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values);
+
#endif /* EFSYS_OPT_MON_STATS */
extern void
@@ -970,12 +1030,39 @@ efx_phy_media_type_get(
__in efx_nic_t *enp,
__out efx_phy_media_type_t *typep);
+/*
+ * 2-wire device address of the base information in accordance with SFF-8472
+ * Diagnostic Monitoring Interface for Optical Transceivers section
+ * 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
+
+/*
+ * 2-wire device address of the digital diagnostics monitoring interface
+ * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
+ * Transceivers section 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
+
+/*
+ * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
+ * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
+ * Operation.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
+
+/*
+ * Maximum accessible data offset for PHY module information.
+ */
+#define EFX_PHY_MEDIA_INFO_MAX_OFFSET 0x100
+
+
extern __checkReturn efx_rc_t
efx_phy_module_get_info(
__in efx_nic_t *enp,
__in uint8_t dev_addr,
- __in uint8_t offset,
- __in uint8_t len,
+ __in size_t offset,
+ __in size_t len,
__out_bcount(len) uint8_t *data);
#if EFSYS_OPT_PHY_STATS
@@ -1194,6 +1281,7 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_rx_prefix_size;
uint32_t enc_rx_buf_align_start;
uint32_t enc_rx_buf_align_end;
+#if EFSYS_OPT_RX_SCALE
uint32_t enc_rx_scale_max_exclusive_contexts;
/*
* Mask of supported hash algorithms.
@@ -1206,6 +1294,7 @@ typedef struct efx_nic_cfg_s {
*/
boolean_t enc_rx_scale_l4_hash_supported;
boolean_t enc_rx_scale_additional_modes_supported;
+#endif /* EFSYS_OPT_RX_SCALE */
#if EFSYS_OPT_LOOPBACK
efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
#endif /* EFSYS_OPT_LOOPBACK */
@@ -1240,6 +1329,7 @@ typedef struct efx_nic_cfg_s {
boolean_t enc_bug35388_workaround;
boolean_t enc_bug41750_workaround;
boolean_t enc_bug61265_workaround;
+ boolean_t enc_bug61297_workaround;
boolean_t enc_rx_batching_enabled;
/* Maximum number of descriptors completed in an rx event. */
uint32_t enc_rx_batch_max;
@@ -1483,6 +1573,8 @@ typedef enum efx_nvram_type_e {
EFX_NVRAM_LICENSE,
EFX_NVRAM_UEFIROM,
EFX_NVRAM_MUM_FIRMWARE,
+ EFX_NVRAM_DYNCONFIG_DEFAULTS,
+ EFX_NVRAM_ROMCONFIG_DEFAULTS,
EFX_NVRAM_NTYPES,
} efx_nvram_type_t;
@@ -1608,6 +1700,87 @@ efx_bootcfg_write(
__in_bcount(size) uint8_t *data,
__in size_t size);
+
+/*
+ * Processing routines for buffers arranged in the DHCP/BOOTP option format
+ * (see https://tools.ietf.org/html/rfc1533)
+ *
+ * Summarising the format: the buffer is a sequence of options. All options
+ * begin with a tag octet, which uniquely identifies the option. Fixed-
+ * length options without data consist of only a tag octet. Only options PAD
+ * (0) and END (255) are fixed length. All other options are variable-length
+ * with a length octet following the tag octet. The value of the length
+ * octet does not include the two octets specifying the tag and length. The
+ * length octet is followed by "length" octets of data.
+ *
+ * Option data may be a sequence of sub-options in the same format. The data
+ * content of the encapsulating option is one or more encapsulated sub-options,
+ * with no terminating END tag is required.
+ *
+ * To be valid, the top-level sequence of options should be terminated by an
+ * END tag. The buffer should be padded with the PAD byte.
+ *
+ * When stored to NVRAM, the DHCP option format buffer is preceded by a
+ * checksum octet. The full buffer (including after the END tag) contributes
+ * to the checksum, hence the need to fill the buffer to the end with PAD.
+ */
+
+#define EFX_DHCP_END ((uint8_t)0xff)
+#define EFX_DHCP_PAD ((uint8_t)0)
+
+#define EFX_DHCP_ENCAP_OPT(encapsulator, encapsulated) \
+ (uint16_t)(((encapsulator) << 8) | (encapsulated))
+
+extern __checkReturn uint8_t
+efx_dhcp_csum(
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_dhcp_verify(
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size,
+ __out_opt size_t *usedp);
+
+extern __checkReturn efx_rc_t
+efx_dhcp_find_tag(
+ __in_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt,
+ __deref_out uint8_t **valuepp,
+ __out size_t *value_lengthp);
+
+extern __checkReturn efx_rc_t
+efx_dhcp_find_end(
+ __in_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __deref_out uint8_t **endpp);
+
+
+extern __checkReturn efx_rc_t
+efx_dhcp_delete_tag(
+ __inout_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt);
+
+extern __checkReturn efx_rc_t
+efx_dhcp_add_tag(
+ __inout_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt,
+ __in_bcount_opt(value_length) uint8_t *valuep,
+ __in size_t value_length);
+
+extern __checkReturn efx_rc_t
+efx_dhcp_update_tag(
+ __inout_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt,
+ __in uint8_t *value_locationp,
+ __in_bcount_opt(value_length) uint8_t *valuep,
+ __in size_t value_length);
+
+
#endif /* EFSYS_OPT_BOOTCFG */
#if EFSYS_OPT_IMAGE_LAYOUT
@@ -1689,7 +1862,8 @@ efx_check_reflash_image(
extern __checkReturn efx_rc_t
efx_build_signed_image_write_buffer(
- __out uint8_t *bufferp,
+ __out_bcount(buffer_size)
+ uint8_t *bufferp,
__in uint32_t buffer_size,
__in efx_image_info_t *infop,
__out efx_image_header_t **headerpp);
@@ -2122,7 +2296,7 @@ typedef enum efx_rx_hash_alg_e {
* - a combination of legacy flags
* - a combination of EFX_RX_HASH() flags
*/
-typedef unsigned int efx_rx_hash_type_t;
+typedef uint32_t efx_rx_hash_type_t;
typedef enum efx_rx_hash_support_e {
EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */
@@ -2223,7 +2397,8 @@ extern __checkReturn efx_rc_t
efx_rx_scale_hash_flags_get(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t hash_alg,
- __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out_ecount_part(max_nflags, *nflagsp) unsigned int *flagsp,
+ __in unsigned int max_nflags,
__out unsigned int *nflagsp);
extern __checkReturn efx_rc_t
@@ -2813,9 +2988,23 @@ efx_filter_spec_set_encap_type(
__in efx_filter_inner_frame_match_t inner_frame_match);
extern __checkReturn efx_rc_t
-efx_filter_spec_set_vxlan_full(
+efx_filter_spec_set_vxlan(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vni,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_geneve(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vni,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_nvgre(
__inout efx_filter_spec_t *spec,
- __in const uint8_t *vxlan_id,
+ __in const uint8_t *vsid,
__in const uint8_t *inner_addr,
__in const uint8_t *outer_addr);
@@ -3057,6 +3246,32 @@ efx_nic_set_fw_subvariant(
#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
+typedef enum efx_phy_fec_type_e {
+ EFX_PHY_FEC_NONE = 0,
+ EFX_PHY_FEC_BASER,
+ EFX_PHY_FEC_RS
+} efx_phy_fec_type_t;
+
+extern __checkReturn efx_rc_t
+efx_phy_fec_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_fec_type_t *typep);
+
+typedef struct efx_phy_link_state_s {
+ uint32_t epls_adv_cap_mask;
+ uint32_t epls_lp_cap_mask;
+ uint32_t epls_ld_cap_mask;
+ unsigned int epls_fcntl;
+ efx_phy_fec_type_t epls_fec;
+ efx_link_mode_t epls_link_mode;
+} efx_phy_link_state_t;
+
+extern __checkReturn efx_rc_t
+efx_phy_link_state_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_link_state_t *eplsp);
+
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/sfc/base/efx_annote.h b/drivers/net/sfc/base/efx_annote.h
new file mode 100644
index 00000000..607b43c7
--- /dev/null
+++ b/drivers/net/sfc/base/efx_annote.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_EFX_ANNOTE_H
+#define _SYS_EFX_ANNOTE_H
+
+#if defined(_WIN32) || defined(_WIN64)
+#define EFX_HAVE_WINDOWS_ANNOTATIONS 1
+#else
+#define EFX_HAVE_WINDOWS_ANNOTATIONS 0
+#endif /* defined(_WIN32) || defined(_WIN64) */
+
+#if defined(__sun)
+#define EFX_HAVE_SOLARIS_ANNOTATIONS 1
+#else
+#define EFX_HAVE_SOLARIS_ANNOTATIONS 0
+#endif /* defined(__sun) */
+
+#if !EFX_HAVE_WINDOWS_ANNOTATIONS
+
+/* Ignore Windows SAL annotations on other platforms */
+#define __in
+#define __in_opt
+#define __in_ecount(_n)
+#define __in_ecount_opt(_n)
+#define __in_bcount(_n)
+#define __in_bcount_opt(_n)
+
+#define __out
+#define __out_opt
+#define __out_ecount(_n)
+#define __out_ecount_opt(_n)
+#define __out_ecount_part(_n, _l)
+#define __out_bcount(_n)
+#define __out_bcount_opt(_n)
+#define __out_bcount_part(_n, _l)
+#define __out_bcount_part_opt(_n, _l)
+
+#define __deref_out
+#define __deref_inout
+
+#define __inout
+#define __inout_opt
+#define __inout_ecount(_n)
+#define __inout_ecount_opt(_n)
+#define __inout_bcount(_n)
+#define __inout_bcount_opt(_n)
+#define __inout_bcount_full_opt(_n)
+
+#define __deref_out_bcount_opt(n)
+
+#define __checkReturn
+#define __success(_x)
+
+#define __drv_when(_p, _c)
+
+#endif /* !EFX_HAVE_WINDOWS_ANNOTATIONS */
+
+#if !EFX_HAVE_SOLARIS_ANNOTATIONS
+
+#if EFX_HAVE_WINDOWS_ANNOTATIONS
+
+/*
+ * Support some SunOS/Solaris style _NOTE() annotations
+ *
+ * At present with the facilities provided in the WDL and the SAL we can only
+ * easily act upon _NOTE(ARGUNUSED(arglist)) annotations.
+ *
+ * Intermediate macros to expand individual _NOTE annotation types into
+ * something the WDK or SAL can understand. They shouldn't be used directly,
+ * for example EFX_NOTE_ARGUNUSED() is only used as an intermediate step on the
+ * transformation of _NOTE(ARGUNSED(arg1, arg2)) into
+ * UNREFERENCED_PARAMETER((arg1, arg2));
+ */
+#define EFX_NOTE_ALIGNMENT(_fname, _n)
+#define EFX_NOTE_ARGUNUSED(...) UNREFERENCED_PARAMETER((__VA_ARGS__));
+#define EFX_NOTE_CONSTANTCONDITION
+#define EFX_NOTE_CONSTCOND
+#define EFX_NOTE_EMPTY
+#define EFX_NOTE_FALLTHROUGH
+#define EFX_NOTE_FALLTHRU
+#define EFX_NOTE_LINTED(_msg)
+#define EFX_NOTE_NOTREACHED
+#define EFX_NOTE_PRINTFLIKE(_n)
+#define EFX_NOTE_SCANFLIKE(_n)
+#define EFX_NOTE_VARARGS(_n)
+
+#define _NOTE(_annotation) EFX_NOTE_ ## _annotation
+
+#else
+
+/* Ignore Solaris annotations on other platforms */
+
+#define _NOTE(_annotation)
+
+#endif /* EFX_HAVE_WINDOWS_ANNOTATIONS */
+
+#endif /* !EFX_HAVE_SOLARIS_ANNOTATIONS */
+
+#endif /* _SYS_EFX_ANNOTE_H */
diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c
index 715e18e8..3b0401e8 100644
--- a/drivers/net/sfc/base/efx_bootcfg.c
+++ b/drivers/net/sfc/base/efx_bootcfg.c
@@ -19,8 +19,33 @@
#define BOOTCFG_PER_PF 0x800
#define BOOTCFG_PF_COUNT 16
-#define DHCP_END ((uint8_t)0xff)
-#define DHCP_PAD ((uint8_t)0)
+#define DHCP_OPT_HAS_VALUE(opt) \
+ (((opt) > EFX_DHCP_PAD) && ((opt) < EFX_DHCP_END))
+
+#define DHCP_MAX_VALUE 255
+
+#define DHCP_ENCAPSULATOR(encap_opt) ((encap_opt) >> 8)
+#define DHCP_ENCAPSULATED(encap_opt) ((encap_opt) & 0xff)
+#define DHCP_IS_ENCAP_OPT(opt) DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATOR(opt))
+
+typedef struct efx_dhcp_tag_hdr_s {
+ uint8_t tag;
+ uint8_t length;
+} efx_dhcp_tag_hdr_t;
+
+/*
+ * Length calculations for tags with value field. PAD and END
+ * have a fixed length of 1, with no length or value field.
+ */
+#define DHCP_FULL_TAG_LENGTH(hdr) \
+ (sizeof (efx_dhcp_tag_hdr_t) + (hdr)->length)
+
+#define DHCP_NEXT_TAG(hdr) \
+ ((efx_dhcp_tag_hdr_t *)(((uint8_t *)(hdr)) + \
+ DHCP_FULL_TAG_LENGTH((hdr))))
+
+#define DHCP_CALC_TAG_LENGTH(payload_len) \
+ ((payload_len) + sizeof (efx_dhcp_tag_hdr_t))
/* Report the layout of bootcfg sectors in NVRAM partition. */
@@ -110,14 +135,11 @@ fail1:
}
-static __checkReturn uint8_t
-efx_bootcfg_csum(
- __in efx_nic_t *enp,
+ __checkReturn uint8_t
+efx_dhcp_csum(
__in_bcount(size) uint8_t const *data,
__in size_t size)
{
- _NOTE(ARGUNUSED(enp))
-
unsigned int pos;
uint8_t checksum = 0;
@@ -126,9 +148,8 @@ efx_bootcfg_csum(
return (checksum);
}
-static __checkReturn efx_rc_t
-efx_bootcfg_verify(
- __in efx_nic_t *enp,
+ __checkReturn efx_rc_t
+efx_dhcp_verify(
__in_bcount(size) uint8_t const *data,
__in size_t size,
__out_opt size_t *usedp)
@@ -144,12 +165,12 @@ efx_bootcfg_verify(
/* Consume tag */
tag = data[offset];
- if (tag == DHCP_END) {
+ if (tag == EFX_DHCP_END) {
offset++;
used = offset;
break;
}
- if (tag == DHCP_PAD) {
+ if (tag == EFX_DHCP_PAD) {
offset++;
continue;
}
@@ -171,8 +192,8 @@ efx_bootcfg_verify(
used = offset;
}
- /* Checksum the entire sector, including bytes after any DHCP_END */
- if (efx_bootcfg_csum(enp, data, size) != 0) {
+ /* Checksum the entire sector, including bytes after any EFX_DHCP_END */
+ if (efx_dhcp_csum(data, size) != 0) {
rc = EINVAL;
goto fail3;
}
@@ -193,6 +214,516 @@ fail1:
}
/*
+ * Walk the entire tag set looking for option. The sought option may be
+ * encapsulated. ENOENT indicates the walk completed without finding the
+ * option. If we run out of buffer during the walk the function will return
+ * ENOSPC.
+ */
+static efx_rc_t
+efx_dhcp_walk_tags(
+ __deref_inout uint8_t **tagpp,
+ __inout size_t *buffer_sizep,
+ __in uint16_t opt)
+{
+ efx_rc_t rc = 0;
+ boolean_t is_encap = B_FALSE;
+
+ if (DHCP_IS_ENCAP_OPT(opt)) {
+ /*
+ * Look for the encapsulator and, if found, limit ourselves
+ * to its payload. If it's not found then the entire tag
+ * cannot be found, so the encapsulated opt search is
+ * skipped.
+ */
+ rc = efx_dhcp_walk_tags(tagpp, buffer_sizep,
+ DHCP_ENCAPSULATOR(opt));
+ if (rc == 0) {
+ *buffer_sizep = ((efx_dhcp_tag_hdr_t *)*tagpp)->length;
+ (*tagpp) += sizeof (efx_dhcp_tag_hdr_t);
+ }
+ opt = DHCP_ENCAPSULATED(opt);
+ is_encap = B_TRUE;
+ }
+
+ EFSYS_ASSERT(!DHCP_IS_ENCAP_OPT(opt));
+
+ while (rc == 0) {
+ size_t size;
+
+ if (*buffer_sizep == 0) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (DHCP_ENCAPSULATED(**tagpp) == opt)
+ break;
+
+ if ((**tagpp) == EFX_DHCP_END) {
+ rc = ENOENT;
+ break;
+ } else if ((**tagpp) == EFX_DHCP_PAD) {
+ size = 1;
+ } else {
+ if (*buffer_sizep < sizeof (efx_dhcp_tag_hdr_t)) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ size =
+ DHCP_FULL_TAG_LENGTH((efx_dhcp_tag_hdr_t *)*tagpp);
+ }
+
+ if (size > *buffer_sizep) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ (*tagpp) += size;
+ (*buffer_sizep) -= size;
+
+ if ((*buffer_sizep == 0) && is_encap) {
+ /* Search within encapulator tag finished */
+ rc = ENOENT;
+ break;
+ }
+ }
+
+ /*
+ * Returns 0 if found otherwise ENOENT indicating search finished
+ * correctly
+ */
+ return (rc);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Locate value buffer for option in the given buffer.
+ * Returns 0 if found, ENOENT indicating search finished
+ * correctly, otherwise search failed before completion.
+ */
+ __checkReturn efx_rc_t
+efx_dhcp_find_tag(
+ __in_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt,
+ __deref_out uint8_t **valuepp,
+ __out size_t *value_lengthp)
+{
+ efx_rc_t rc;
+ uint8_t *tagp = bufferp;
+ size_t len = buffer_length;
+
+ rc = efx_dhcp_walk_tags(&tagp, &len, opt);
+ if (rc == 0) {
+ efx_dhcp_tag_hdr_t *hdrp;
+
+ hdrp = (efx_dhcp_tag_hdr_t *)tagp;
+ *valuepp = (uint8_t *)(&hdrp[1]);
+ *value_lengthp = hdrp->length;
+ } else if (rc != ENOENT) {
+ goto fail1;
+ }
+
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Locate the end tag in the given buffer.
+ * Returns 0 if found, ENOENT indicating search finished
+ * correctly but end tag was not found; otherwise search
+ * failed before completion.
+ */
+ __checkReturn efx_rc_t
+efx_dhcp_find_end(
+ __in_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __deref_out uint8_t **endpp)
+{
+ efx_rc_t rc;
+ uint8_t *endp = bufferp;
+ size_t len = buffer_length;
+
+ rc = efx_dhcp_walk_tags(&endp, &len, EFX_DHCP_END);
+ if (rc == 0)
+ *endpp = endp;
+ else if (rc != ENOENT)
+ goto fail1;
+
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Delete the given tag from anywhere in the buffer. Copes with
+ * encapsulated tags, and updates or deletes the encapsulating opt as
+ * necessary.
+ */
+ __checkReturn efx_rc_t
+efx_dhcp_delete_tag(
+ __inout_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt)
+{
+ efx_rc_t rc;
+ efx_dhcp_tag_hdr_t *hdrp;
+ size_t len;
+ uint8_t *startp;
+ uint8_t *endp;
+
+ len = buffer_length;
+ startp = bufferp;
+
+ if (!DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATED(opt))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_dhcp_walk_tags(&startp, &len, opt);
+ if (rc != 0)
+ goto fail1;
+
+ hdrp = (efx_dhcp_tag_hdr_t *)startp;
+
+ if (DHCP_IS_ENCAP_OPT(opt)) {
+ uint8_t tag_length = DHCP_FULL_TAG_LENGTH(hdrp);
+ uint8_t *encapp = bufferp;
+ efx_dhcp_tag_hdr_t *encap_hdrp;
+
+ len = buffer_length;
+ rc = efx_dhcp_walk_tags(&encapp, &len,
+ DHCP_ENCAPSULATOR(opt));
+ if (rc != 0)
+ goto fail2;
+
+ encap_hdrp = (efx_dhcp_tag_hdr_t *)encapp;
+ if (encap_hdrp->length > tag_length) {
+ encap_hdrp->length = (uint8_t)(
+ (size_t)encap_hdrp->length - tag_length);
+ } else {
+ /* delete the encapsulating tag */
+ hdrp = encap_hdrp;
+ }
+ }
+
+ startp = (uint8_t *)hdrp;
+ endp = (uint8_t *)DHCP_NEXT_TAG(hdrp);
+
+ if (startp < bufferp) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (endp > &bufferp[buffer_length]) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ memmove(startp, endp,
+ buffer_length - (endp - bufferp));
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Write the tag header into write_pointp and optionally copies the payload
+ * into the space following.
+ */
+static void
+efx_dhcp_write_tag(
+ __in uint8_t *write_pointp,
+ __in uint16_t opt,
+ __in_bcount_opt(value_length)
+ uint8_t *valuep,
+ __in size_t value_length)
+{
+ efx_dhcp_tag_hdr_t *hdrp = (efx_dhcp_tag_hdr_t *)write_pointp;
+ hdrp->tag = DHCP_ENCAPSULATED(opt);
+ hdrp->length = (uint8_t)value_length;
+ if ((value_length > 0) && (valuep != NULL))
+ memcpy(&hdrp[1], valuep, value_length);
+}
+
+/*
+ * Add the given tag to the end of the buffer. Copes with creating an
+ * encapsulated tag, and updates or creates the encapsulating opt as
+ * necessary.
+ */
+ __checkReturn efx_rc_t
+efx_dhcp_add_tag(
+ __inout_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt,
+ __in_bcount_opt(value_length) uint8_t *valuep,
+ __in size_t value_length)
+{
+ efx_rc_t rc;
+ efx_dhcp_tag_hdr_t *encap_hdrp = NULL;
+ uint8_t *insert_pointp = NULL;
+ uint8_t *endp;
+ size_t available_space;
+ size_t added_length;
+ size_t search_size;
+ uint8_t *searchp;
+
+ if (!DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATED(opt))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (value_length > DHCP_MAX_VALUE) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((value_length > 0) && (valuep == NULL)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ endp = bufferp;
+ available_space = buffer_length;
+ rc = efx_dhcp_walk_tags(&endp, &available_space, EFX_DHCP_END);
+ if (rc != 0)
+ goto fail4;
+
+ searchp = bufferp;
+ search_size = buffer_length;
+ if (DHCP_IS_ENCAP_OPT(opt)) {
+ rc = efx_dhcp_walk_tags(&searchp, &search_size,
+ DHCP_ENCAPSULATOR(opt));
+ if (rc == 0) {
+ encap_hdrp = (efx_dhcp_tag_hdr_t *)searchp;
+
+ /* Check encapsulated tag is not present */
+ search_size = encap_hdrp->length;
+ rc = efx_dhcp_walk_tags(&searchp, &search_size,
+ opt);
+ if (rc != ENOENT) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Check encapsulator will not overflow */
+ if (((size_t)encap_hdrp->length +
+ DHCP_CALC_TAG_LENGTH(value_length)) >
+ DHCP_MAX_VALUE) {
+ rc = E2BIG;
+ goto fail6;
+ }
+
+ /* Insert at start of existing encapsulator */
+ insert_pointp = (uint8_t *)&encap_hdrp[1];
+ opt = DHCP_ENCAPSULATED(opt);
+ } else if (rc == ENOENT) {
+ encap_hdrp = NULL;
+ } else {
+ goto fail7;
+ }
+ } else {
+ /* Check unencapsulated tag is not present */
+ rc = efx_dhcp_walk_tags(&searchp, &search_size,
+ opt);
+ if (rc != ENOENT) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ }
+
+ if (insert_pointp == NULL) {
+ /* Insert at end of existing tags */
+ insert_pointp = endp;
+ }
+
+ /* Includes the new encapsulator tag hdr if required */
+ added_length = DHCP_CALC_TAG_LENGTH(value_length) +
+ (DHCP_IS_ENCAP_OPT(opt) ? sizeof (efx_dhcp_tag_hdr_t) : 0);
+
+ if (available_space <= added_length) {
+ rc = ENOMEM;
+ goto fail9;
+ }
+
+ memmove(insert_pointp + added_length, insert_pointp,
+ available_space - added_length);
+
+ if (DHCP_IS_ENCAP_OPT(opt)) {
+ /* Create new encapsulator header */
+ added_length -= sizeof (efx_dhcp_tag_hdr_t);
+ efx_dhcp_write_tag(insert_pointp,
+ DHCP_ENCAPSULATOR(opt), NULL, added_length);
+ insert_pointp += sizeof (efx_dhcp_tag_hdr_t);
+ } else if (encap_hdrp)
+ /* Modify existing encapsulator header */
+ encap_hdrp->length +=
+ ((uint8_t)DHCP_CALC_TAG_LENGTH(value_length));
+
+ efx_dhcp_write_tag(insert_pointp, opt, valuep, value_length);
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Update an existing tag to the new value. Copes with encapsulated
+ * tags, and updates the encapsulating opt as necessary.
+ */
+ __checkReturn efx_rc_t
+efx_dhcp_update_tag(
+ __inout_bcount(buffer_length) uint8_t *bufferp,
+ __in size_t buffer_length,
+ __in uint16_t opt,
+ __in uint8_t *value_locationp,
+ __in_bcount_opt(value_length) uint8_t *valuep,
+ __in size_t value_length)
+{
+ efx_rc_t rc;
+ uint8_t *write_pointp = value_locationp - sizeof (efx_dhcp_tag_hdr_t);
+ efx_dhcp_tag_hdr_t *hdrp = (efx_dhcp_tag_hdr_t *)write_pointp;
+ efx_dhcp_tag_hdr_t *encap_hdrp = NULL;
+ size_t old_length;
+
+ if (!DHCP_OPT_HAS_VALUE(DHCP_ENCAPSULATED(opt))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (value_length > DHCP_MAX_VALUE) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((value_length > 0) && (valuep == NULL)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ old_length = hdrp->length;
+
+ if (old_length < value_length) {
+ uint8_t *endp = bufferp;
+ size_t available_space = buffer_length;
+
+ rc = efx_dhcp_walk_tags(&endp, &available_space,
+ EFX_DHCP_END);
+ if (rc != 0)
+ goto fail4;
+
+ if (available_space < (value_length - old_length)) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ }
+
+ if (DHCP_IS_ENCAP_OPT(opt)) {
+ uint8_t *encapp = bufferp;
+ size_t following_encap = buffer_length;
+ size_t new_length;
+
+ rc = efx_dhcp_walk_tags(&encapp, &following_encap,
+ DHCP_ENCAPSULATOR(opt));
+ if (rc != 0)
+ goto fail6;
+
+ encap_hdrp = (efx_dhcp_tag_hdr_t *)encapp;
+
+ new_length = ((size_t)encap_hdrp->length +
+ value_length - old_length);
+ /* Check encapsulator will not overflow */
+ if (new_length > DHCP_MAX_VALUE) {
+ rc = E2BIG;
+ goto fail7;
+ }
+
+ encap_hdrp->length = (uint8_t)new_length;
+ }
+
+ /*
+ * Move the following data up/down to accomodate the new payload
+ * length.
+ */
+ if (old_length != value_length) {
+ uint8_t *destp = (uint8_t *)DHCP_NEXT_TAG(hdrp) +
+ value_length - old_length;
+ size_t count = &bufferp[buffer_length] -
+ (uint8_t *)DHCP_NEXT_TAG(hdrp);
+
+ memmove(destp, DHCP_NEXT_TAG(hdrp), count);
+ }
+
+ EFSYS_ASSERT(hdrp->tag == DHCP_ENCAPSULATED(opt));
+ efx_dhcp_write_tag(write_pointp, opt, valuep, value_length);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
* Copy bootcfg sector data to a target buffer which may differ in size.
* Optionally corrects format errors in source buffer.
*/
@@ -206,17 +737,19 @@ efx_bootcfg_copy_sector(
__in size_t data_size,
__in boolean_t handle_format_errors)
{
+ _NOTE(ARGUNUSED(enp))
+
size_t used_bytes;
efx_rc_t rc;
- /* Minimum buffer is checksum byte and DHCP_END terminator */
+ /* Minimum buffer is checksum byte and EFX_DHCP_END terminator */
if (data_size < 2) {
rc = ENOSPC;
goto fail1;
}
/* Verify that the area is correctly formatted and checksummed */
- rc = efx_bootcfg_verify(enp, sector, sector_length,
+ rc = efx_dhcp_verify(sector, sector_length,
&used_bytes);
if (!handle_format_errors) {
@@ -224,8 +757,8 @@ efx_bootcfg_copy_sector(
goto fail2;
if ((used_bytes < 2) ||
- (sector[used_bytes - 1] != DHCP_END)) {
- /* Block too short, or DHCP_END missing */
+ (sector[used_bytes - 1] != EFX_DHCP_END)) {
+ /* Block too short, or EFX_DHCP_END missing */
rc = ENOENT;
goto fail3;
}
@@ -234,24 +767,24 @@ efx_bootcfg_copy_sector(
/* Synthesize empty format on verification failure */
if (rc != 0 || used_bytes == 0) {
sector[0] = 0;
- sector[1] = DHCP_END;
+ sector[1] = EFX_DHCP_END;
used_bytes = 2;
}
- EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and EFX_DHCP_END */
EFSYS_ASSERT(used_bytes <= sector_length);
EFSYS_ASSERT(sector_length >= 2);
/*
- * Legacy bootcfg sectors don't terminate with a DHCP_END character.
- * Modify the returned payload so it does.
+ * Legacy bootcfg sectors don't terminate with an EFX_DHCP_END
+ * character. Modify the returned payload so it does.
* Reinitialise the sector if there isn't room for the character.
*/
- if (sector[used_bytes - 1] != DHCP_END) {
+ if (sector[used_bytes - 1] != EFX_DHCP_END) {
if (used_bytes >= sector_length) {
sector[0] = 0;
used_bytes = 1;
}
- sector[used_bytes] = DHCP_END;
+ sector[used_bytes] = EFX_DHCP_END;
++used_bytes;
}
@@ -274,10 +807,11 @@ efx_bootcfg_copy_sector(
(void) memset(data + used_bytes, 0, data_size - used_bytes);
/*
- * The checksum includes trailing data after any DHCP_END character,
- * which we've just modified (by truncation or appending DHCP_END).
+ * The checksum includes trailing data after any EFX_DHCP_END
+ * character, which we've just modified (by truncation or appending
+ * EFX_DHCP_END).
*/
- data[0] -= efx_bootcfg_csum(enp, data, data_size);
+ data[0] -= efx_dhcp_csum(data, data_size);
return (0);
@@ -307,7 +841,7 @@ efx_bootcfg_read(
efx_rc_t rc;
uint32_t sector_number;
- /* Minimum buffer is checksum byte and DHCP_END terminator */
+ /* Minimum buffer is checksum byte and EFX_DHCP_END terminator */
if (size < 2) {
rc = ENOSPC;
goto fail1;
@@ -343,10 +877,10 @@ efx_bootcfg_read(
}
/*
- * We need to read the entire BOOTCFG sector to ensure we read all the
- * tags, because legacy bootcfg sectors are not guaranteed to end with
- * a DHCP_END character. If the user hasn't supplied a sufficiently
- * large buffer then use our own buffer.
+ * We need to read the entire BOOTCFG sector to ensure we read all
+ * tags, because legacy bootcfg sectors are not guaranteed to end
+ * with an EFX_DHCP_END character. If the user hasn't supplied a
+ * sufficiently large buffer then use our own buffer.
*/
if (sector_length > size) {
EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
@@ -370,28 +904,29 @@ efx_bootcfg_read(
goto fail9;
/* Verify that the area is correctly formatted and checksummed */
- rc = efx_bootcfg_verify(enp, payload, sector_length,
+ rc = efx_dhcp_verify(payload, sector_length,
&used_bytes);
if (rc != 0 || used_bytes == 0) {
payload[0] = 0;
- payload[1] = DHCP_END;
+ payload[1] = EFX_DHCP_END;
used_bytes = 2;
}
- EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and EFX_DHCP_END */
EFSYS_ASSERT(used_bytes <= sector_length);
/*
- * Legacy bootcfg sectors don't terminate with a DHCP_END character.
- * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
- * definition large enough for any valid (per-port) bootcfg sector,
- * so reinitialise the sector if there isn't room for the character.
+ * Legacy bootcfg sectors don't terminate with an EFX_DHCP_END
+ * character. Modify the returned payload so it does.
+ * BOOTCFG_MAX_SIZE is by definition large enough for any valid
+ * (per-port) bootcfg sector, so reinitialise the sector if there
+ * isn't room for the character.
*/
- if (payload[used_bytes - 1] != DHCP_END) {
+ if (payload[used_bytes - 1] != EFX_DHCP_END) {
if (used_bytes >= sector_length)
used_bytes = 1;
- payload[used_bytes] = DHCP_END;
+ payload[used_bytes] = EFX_DHCP_END;
++used_bytes;
}
@@ -417,10 +952,10 @@ efx_bootcfg_read(
(void) memset(data + used_bytes, 0, size - used_bytes);
/*
- * The checksum includes trailing data after any DHCP_END character,
- * which we've just modified (by truncation or appending DHCP_END).
+ * The checksum includes trailing data after any EFX_DHCP_END character,
+ * which we've just modified (by truncation or appending EFX_DHCP_END).
*/
- data[0] -= efx_bootcfg_csum(enp, data, size);
+ data[0] -= efx_dhcp_csum(data, size);
return (0);
@@ -490,12 +1025,16 @@ efx_bootcfg_write(
goto fail3;
}
- if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
+ if ((rc = efx_dhcp_verify(data, size, &used_bytes)) != 0)
goto fail4;
- /* The caller *must* terminate their block with a DHCP_END character */
- if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] != DHCP_END)) {
- /* Block too short or DHCP_END missing */
+ /*
+ * The caller *must* terminate their block with a EFX_DHCP_END
+ * character
+ */
+ if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] !=
+ EFX_DHCP_END)) {
+ /* Block too short or EFX_DHCP_END missing */
rc = ENOENT;
goto fail5;
}
@@ -528,13 +1067,13 @@ efx_bootcfg_write(
goto fail9;
/*
- * Insert the BOOTCFG sector into the partition, Zero out all data after
- * the DHCP_END tag, and adjust the checksum.
+ * Insert the BOOTCFG sector into the partition, Zero out all data
+ * after the EFX_DHCP_END tag, and adjust the checksum.
*/
(void) memset(partn_data + sector_offset, 0x0, sector_length);
(void) memcpy(partn_data + sector_offset, data, used_bytes);
- checksum = efx_bootcfg_csum(enp, data, used_bytes);
+ checksum = efx_dhcp_csum(data, used_bytes);
partn_data[sector_offset] -= checksum;
if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c
index 412298ac..a7523b38 100644
--- a/drivers/net/sfc/base/efx_filter.c
+++ b/drivers/net/sfc/base/efx_filter.c
@@ -490,27 +490,42 @@ fail1:
}
/*
- * Specify inner and outer Ethernet address and VXLAN ID in filter
+ * Specify inner and outer Ethernet address and VNI or VSID in tunnel filter
* specification.
*/
- __checkReturn efx_rc_t
-efx_filter_spec_set_vxlan_full(
- __inout efx_filter_spec_t *spec,
- __in const uint8_t *vxlan_id,
+static __checkReturn efx_rc_t
+efx_filter_spec_set_tunnel(
+ __inout efx_filter_spec_t *spec,
+ __in efx_tunnel_protocol_t encap_type,
+ __in const uint8_t *vni_or_vsid,
__in const uint8_t *inner_addr,
__in const uint8_t *outer_addr)
{
+ efx_rc_t rc;
+
EFSYS_ASSERT3P(spec, !=, NULL);
- EFSYS_ASSERT3P(vxlan_id, !=, NULL);
+ EFSYS_ASSERT3P(vni_or_vsid, !=, NULL);
EFSYS_ASSERT3P(inner_addr, !=, NULL);
EFSYS_ASSERT3P(outer_addr, !=, NULL);
- if ((inner_addr == NULL) && (outer_addr == NULL))
- return (EINVAL);
+ switch (encap_type) {
+ case EFX_TUNNEL_PROTOCOL_VXLAN:
+ case EFX_TUNNEL_PROTOCOL_GENEVE:
+ case EFX_TUNNEL_PROTOCOL_NVGRE:
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((inner_addr == NULL) && (outer_addr == NULL)) {
+ rc = EINVAL;
+ goto fail2;
+ }
- if (vxlan_id != NULL) {
+ if (vni_or_vsid != NULL) {
spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
- memcpy(spec->efs_vni_or_vsid, vxlan_id, EFX_VNI_OR_VSID_LEN);
+ memcpy(spec->efs_vni_or_vsid, vni_or_vsid, EFX_VNI_OR_VSID_LEN);
}
if (outer_addr != NULL) {
spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
@@ -520,10 +535,63 @@ efx_filter_spec_set_vxlan_full(
spec->efs_match_flags |= EFX_FILTER_MATCH_IFRM_LOC_MAC;
memcpy(spec->efs_ifrm_loc_mac, inner_addr, EFX_MAC_ADDR_LEN);
}
+
spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
- spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+ spec->efs_encap_type = encap_type;
return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Specify inner and outer Ethernet address and VNI in VXLAN filter
+ * specification.
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_vxlan(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vni,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr)
+{
+ return efx_filter_spec_set_tunnel(spec, EFX_TUNNEL_PROTOCOL_VXLAN,
+ vni, inner_addr, outer_addr);
+}
+
+/*
+ * Specify inner and outer Ethernet address and VNI in Geneve filter
+ * specification.
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_geneve(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vni,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr)
+{
+ return efx_filter_spec_set_tunnel(spec, EFX_TUNNEL_PROTOCOL_GENEVE,
+ vni, inner_addr, outer_addr);
+}
+
+/*
+ * Specify inner and outer Ethernet address and vsid in NVGRE filter
+ * specification.
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_nvgre(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vsid,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr)
+{
+ return efx_filter_spec_set_tunnel(spec, EFX_TUNNEL_PROTOCOL_NVGRE,
+ vsid, inner_addr, outer_addr);
}
#if EFSYS_OPT_RX_SCALE
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
index 548834f9..bad23f81 100644
--- a/drivers/net/sfc/base/efx_impl.h
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -59,6 +59,7 @@ extern "C" {
#define EFX_RESET_PHY 0x00000001
#define EFX_RESET_RXQ_ERR 0x00000002
#define EFX_RESET_TXQ_ERR 0x00000004
+#define EFX_RESET_HW_UNAVAIL 0x00000008
typedef enum efx_mac_type_e {
EFX_MAC_INVALID = 0,
@@ -223,6 +224,7 @@ typedef struct efx_phy_ops_s {
efx_rc_t (*epo_reconfigure)(efx_nic_t *);
efx_rc_t (*epo_verify)(efx_nic_t *);
efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *);
+ efx_rc_t (*epo_link_state_get)(efx_nic_t *, efx_phy_link_state_t *);
#if EFSYS_OPT_PHY_STATS
efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
uint32_t *);
@@ -317,6 +319,8 @@ typedef struct efx_mon_ops_s {
#if EFSYS_OPT_MON_STATS
efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
efx_mon_stat_value_t *);
+ efx_rc_t (*emo_limits_update)(efx_nic_t *,
+ efx_mon_stat_limits_t *);
#endif /* EFSYS_OPT_MON_STATS */
} efx_mon_ops_t;
@@ -354,6 +358,8 @@ typedef struct efx_nic_ops_s {
efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *);
efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t,
uint32_t *, size_t *);
+ boolean_t (*eno_hw_unavailable)(efx_nic_t *);
+ void (*eno_set_hw_unavailable)(efx_nic_t *);
#if EFSYS_OPT_DIAG
efx_rc_t (*eno_register_test)(efx_nic_t *);
#endif /* EFSYS_OPT_DIAG */
@@ -507,7 +513,7 @@ typedef struct efx_nvram_ops_s {
uint32_t *, uint16_t *);
efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
uint16_t *);
- efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t,
+ efx_rc_t (*envo_buffer_validate)(uint32_t,
caddr_t, size_t);
} efx_nvram_ops_t;
#endif /* EFSYS_OPT_NVRAM */
@@ -583,7 +589,7 @@ efx_mcdi_nvram_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in uint32_t offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size);
__checkReturn efx_rc_t
diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c
index 49c00347..4081aef1 100644
--- a/drivers/net/sfc/base/efx_lic.c
+++ b/drivers/net/sfc/base/efx_lic.c
@@ -301,12 +301,11 @@ efx_mcdi_fc_license_update_license(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FC_IN_LICENSE_LEN, 0);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
@@ -347,13 +346,12 @@ efx_mcdi_fc_license_get_key_stats(
__out efx_key_stats_t *eksp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
- MC_CMD_FC_OUT_LICENSE_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FC_IN_LICENSE_LEN,
+ MC_CMD_FC_OUT_LICENSE_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
@@ -663,8 +661,8 @@ efx_mcdi_licensed_app_state(
__out boolean_t *licensedp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
- MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN);
uint32_t app_state;
efx_rc_t rc;
@@ -676,7 +674,6 @@ efx_mcdi_licensed_app_state(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
@@ -722,12 +719,11 @@ efx_mcdi_licensing_update_licenses(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_LICENSING_IN_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_IN_LEN, 0);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
@@ -765,13 +761,12 @@ efx_mcdi_licensing_get_key_stats(
__out efx_key_stats_t *eksp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
- MC_CMD_LICENSING_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_IN_LEN,
+ MC_CMD_LICENSING_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
@@ -829,13 +824,12 @@ efx_mcdi_licensing_v3_update_licenses(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_V3_IN_LEN, 0);
efx_rc_t rc;
EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
(enp->en_family == EFX_FAMILY_MEDFORD2));
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
@@ -866,14 +860,13 @@ efx_mcdi_licensing_v3_report_license(
__out efx_key_stats_t *eksp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
- MC_CMD_LICENSING_V3_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_V3_IN_LEN,
+ MC_CMD_LICENSING_V3_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
(enp->en_family == EFX_FAMILY_MEDFORD2));
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
@@ -930,15 +923,14 @@ efx_mcdi_licensing_v3_app_state(
__out boolean_t *licensedp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
- MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN);
uint32_t app_state;
efx_rc_t rc;
EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
(enp->en_family == EFX_FAMILY_MEDFORD2));
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
@@ -990,28 +982,15 @@ efx_mcdi_licensing_v3_get_id(
uint8_t *bufferp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
- MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
efx_rc_t rc;
req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
-
- if (bufferp == NULL) {
- /* Request id type and length only */
- req.emr_in_buf = bufferp;
- req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
- req.emr_out_buf = bufferp;
- req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
- (void) memset(payload, 0, sizeof (payload));
- } else {
- /* Request full buffer */
- req.emr_in_buf = bufferp;
- req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
- req.emr_out_buf = bufferp;
- req.emr_out_length =
- MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
- (void) memset(bufferp, 0, req.emr_out_length);
- }
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX;
efx_mcdi_execute_quiet(enp, &req);
@@ -1029,19 +1008,10 @@ efx_mcdi_licensing_v3_get_id(
*lengthp =
MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
- if (bufferp == NULL) {
- /*
- * Modify length requirements to indicate to caller the extra
- * buffering needed to read the complete output.
- */
- *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
- } else {
- /* Shift ID down to start of buffer */
- memmove(bufferp,
- bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
- *lengthp);
- memset(bufferp + (*lengthp), 0,
- MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
+ if (bufferp != NULL) {
+ memcpy(bufferp,
+ payload + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
+ MIN(buffer_size, *lengthp));
}
return (0);
@@ -1158,10 +1128,12 @@ efx_lic_v3_read_key(
__in size_t key_max_size,
__out uint32_t *lengthp)
{
+ uint32_t tag;
+
_NOTE(ARGUNUSED(enp))
return ef10_nvram_buffer_get_item(bufferp, buffer_size,
- offset, length, keyp, key_max_size, lengthp);
+ offset, length, &tag, keyp, key_max_size, lengthp);
}
__checkReturn efx_rc_t
@@ -1179,7 +1151,7 @@ efx_lic_v3_write_key(
EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
- offset, keyp, length, lengthp);
+ offset, TLV_TAG_LICENSE, keyp, length, lengthp);
}
__checkReturn efx_rc_t
@@ -1221,8 +1193,10 @@ efx_lic_v3_create_partition(
{
efx_rc_t rc;
+ _NOTE(ARGUNUSED(enp))
+
/* Construct empty partition */
- if ((rc = ef10_nvram_buffer_create(enp,
+ if ((rc = ef10_nvram_buffer_create(
NVRAM_PARTITION_TYPE_LICENSE,
bufferp, buffer_size)) != 0) {
rc = EFAULT;
@@ -1246,13 +1220,16 @@ efx_lic_v3_finish_partition(
{
efx_rc_t rc;
+ _NOTE(ARGUNUSED(enp))
+
if ((rc = ef10_nvram_buffer_finish(bufferp,
buffer_size)) != 0) {
goto fail1;
}
/* Validate completed partition */
- if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
+ if ((rc = ef10_nvram_buffer_validate(
+ NVRAM_PARTITION_TYPE_LICENSE,
bufferp, buffer_size)) != 0) {
goto fail2;
}
diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c
index d4ebcf26..c896aa0b 100644
--- a/drivers/net/sfc/base/efx_mcdi.c
+++ b/drivers/net/sfc/base/efx_mcdi.c
@@ -496,6 +496,12 @@ efx_mcdi_request_poll(
EFSYS_ASSERT(!emip->emi_ev_cpl);
emrp = emip->emi_pending_req;
+ /* Check if hardware is unavailable */
+ if (efx_nic_hw_unavailable(enp)) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (B_FALSE);
+ }
+
/* Check for reboot atomically w.r.t efx_mcdi_request_start */
if (emip->emi_poll_cnt++ == 0) {
if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
@@ -900,10 +906,10 @@ efx_mcdi_version(
__out_opt efx_mcdi_boot_t *statusp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
- MC_CMD_GET_VERSION_OUT_LEN),
- MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
- MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MAX(MC_CMD_GET_VERSION_IN_LEN, MC_CMD_GET_BOOT_STATUS_IN_LEN),
+ MAX(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN));
efx_word_t *ver_words;
uint16_t version[4];
uint32_t build;
@@ -912,7 +918,6 @@ efx_mcdi_version(
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_VERSION;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
@@ -1018,12 +1023,11 @@ efx_mcdi_get_capabilities(
__out_opt uint32_t *tso2ncp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
- MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
boolean_t v2_capable;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_CAPABILITIES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
@@ -1086,7 +1090,8 @@ efx_mcdi_do_reboot(
__in efx_nic_t *enp,
__in boolean_t after_assertion)
{
- uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN,
+ MC_CMD_REBOOT_OUT_LEN);
efx_mcdi_req_t req;
efx_rc_t rc;
@@ -1099,7 +1104,6 @@ efx_mcdi_do_reboot(
*/
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_REBOOT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
@@ -1150,8 +1154,8 @@ efx_mcdi_read_assertion(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
- MC_CMD_GET_ASSERTS_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN);
const char *reason;
unsigned int flags;
unsigned int index;
@@ -1252,11 +1256,10 @@ efx_mcdi_drv_attach(
__in boolean_t attach)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
- MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_LEN,
+ MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_DRV_ATTACH;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
@@ -1311,11 +1314,10 @@ efx_mcdi_get_board_cfg(
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
- MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMIN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_BOARD_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
@@ -1391,11 +1393,10 @@ efx_mcdi_get_resource_limits(
__out_opt uint32_t *ntxqp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
- MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
@@ -1438,8 +1439,8 @@ efx_mcdi_get_phy_cfg(
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
- MC_CMD_GET_PHY_CFG_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN,
+ MC_CMD_GET_PHY_CFG_OUT_LEN);
#if EFSYS_OPT_NAMES
const char *namep;
size_t namelen;
@@ -1447,7 +1448,6 @@ efx_mcdi_get_phy_cfg(
uint32_t phy_media_type;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
@@ -1686,11 +1686,10 @@ efx_mcdi_bist_start(
__in efx_bist_type_t type)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
- MC_CMD_START_BIST_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN,
+ MC_CMD_START_BIST_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_START_BIST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
@@ -1749,11 +1748,10 @@ efx_mcdi_log_ctrl(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
- MC_CMD_LOG_CTRL_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN,
+ MC_CMD_LOG_CTRL_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LOG_CTRL;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
@@ -1798,8 +1796,8 @@ efx_mcdi_mac_stats(
__in uint16_t period_ms)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
- MC_CMD_MAC_STATS_V2_OUT_DMA_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN,
+ MC_CMD_MAC_STATS_V2_OUT_DMA_LEN);
int clear = (action == EFX_STATS_CLEAR);
int upload = (action == EFX_STATS_UPLOAD);
int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
@@ -1807,7 +1805,6 @@ efx_mcdi_mac_stats(
int disable = (action == EFX_STATS_DISABLE);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_MAC_STATS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
@@ -1979,11 +1976,10 @@ efx_mcdi_get_function_info(
__out_opt uint32_t *vfp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
- MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+ MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
@@ -2024,11 +2020,10 @@ efx_mcdi_privilege_mask(
__out uint32_t *maskp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
- MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN,
+ MC_CMD_PRIVILEGE_MASK_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
@@ -2073,11 +2068,10 @@ efx_mcdi_set_workaround(
__out_opt uint32_t *flagsp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
- MC_CMD_WORKAROUND_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN,
+ MC_CMD_WORKAROUND_EXT_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_WORKAROUND;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
@@ -2117,10 +2111,9 @@ efx_mcdi_get_workarounds(
__out_opt uint32_t *enabledp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
req.emr_in_buf = NULL;
req.emr_in_length = 0;
@@ -2157,6 +2150,14 @@ fail1:
*/
#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
+/*
+ * Transceiver identifiers from SFF-8024 Table 4-1.
+ */
+#define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */
+#define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */
+#define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */
+#define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */
+
static __checkReturn efx_rc_t
efx_mcdi_get_phy_media_info(
__in efx_nic_t *enp,
@@ -2166,14 +2167,13 @@ efx_mcdi_get_phy_media_info(
__out_bcount(len) uint8_t *data)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
- MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
- EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE));
efx_rc_t rc;
EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
@@ -2218,39 +2218,19 @@ fail1:
return (rc);
}
-/*
- * 2-wire device address of the base information in accordance with SFF-8472
- * Diagnostic Monitoring Interface for Optical Transceivers section
- * 4 Memory Organization.
- */
-#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
-
-/*
- * 2-wire device address of the digital diagnostics monitoring interface
- * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
- * Transceivers section 4 Memory Organization.
- */
-#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
-
-/*
- * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
- * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
- * Operation.
- */
-#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
-
__checkReturn efx_rc_t
efx_mcdi_phy_module_get_info(
__in efx_nic_t *enp,
__in uint8_t dev_addr,
- __in uint8_t offset,
- __in uint8_t len,
+ __in size_t offset,
+ __in size_t len,
__out_bcount(len) uint8_t *data)
{
efx_port_t *epp = &(enp->en_port);
efx_rc_t rc;
uint32_t mcdi_lower_page;
uint32_t mcdi_upper_page;
+ uint8_t id;
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
@@ -2264,6 +2244,26 @@ efx_mcdi_phy_module_get_info(
*/
switch (epp->ep_fixed_port_type) {
case EFX_PHY_MEDIA_SFP_PLUS:
+ case EFX_PHY_MEDIA_QSFP_PLUS:
+ /* Port type supports modules */
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ /*
+ * For all supported port types, MCDI page 0 offset 0 holds the
+ * transceiver identifier. Probe to determine the data layout.
+ * Definitions from SFF-8024 Table 4-1.
+ */
+ rc = efx_mcdi_get_phy_media_info(enp,
+ 0, 0, sizeof(id), &id);
+ if (rc != 0)
+ goto fail2;
+
+ switch (id) {
+ case EFX_SFF_TRANSCEIVER_ID_SFP:
/*
* In accordance with SFF-8472 Diagnostic Monitoring
* Interface for Optical Transceivers section 4 Memory
@@ -2298,10 +2298,12 @@ efx_mcdi_phy_module_get_info(
break;
default:
rc = ENOTSUP;
- goto fail1;
+ goto fail3;
}
break;
- case EFX_PHY_MEDIA_QSFP_PLUS:
+ case EFX_SFF_TRANSCEIVER_ID_QSFP:
+ case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS:
+ case EFX_SFF_TRANSCEIVER_ID_QSFP28:
switch (dev_addr) {
case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
/*
@@ -2317,22 +2319,24 @@ efx_mcdi_phy_module_get_info(
break;
default:
rc = ENOTSUP;
- goto fail1;
+ goto fail3;
}
break;
default:
rc = ENOTSUP;
- goto fail1;
+ goto fail3;
}
+ EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF);
+
if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
- uint8_t read_len =
+ size_t read_len =
MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
rc = efx_mcdi_get_phy_media_info(enp,
- mcdi_lower_page, offset, read_len, data);
+ mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data);
if (rc != 0)
- goto fail2;
+ goto fail4;
data += read_len;
len -= read_len;
@@ -2347,13 +2351,17 @@ efx_mcdi_phy_module_get_info(
EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
rc = efx_mcdi_get_phy_media_info(enp,
- mcdi_upper_page, offset, len, data);
+ mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data);
if (rc != 0)
- goto fail3;
+ goto fail5;
}
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h
index 253a9e60..ddf91c11 100644
--- a/drivers/net/sfc/base/efx_mcdi.h
+++ b/drivers/net/sfc/base/efx_mcdi.h
@@ -10,6 +10,10 @@
#include "efx.h"
#include "efx_regs_mcdi.h"
+#if EFSYS_OPT_NAMES
+#include "mc_driver_pcol_strs.h"
+#endif /* EFSYS_OPT_NAMES */
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -215,8 +219,8 @@ extern __checkReturn efx_rc_t
efx_mcdi_phy_module_get_info(
__in efx_nic_t *enp,
__in uint8_t dev_addr,
- __in uint8_t offset,
- __in uint8_t len,
+ __in size_t offset,
+ __in size_t len,
__out_bcount(len) uint8_t *data);
#define MCDI_IN(_emr, _type, _ofst) \
@@ -380,6 +384,17 @@ efx_mcdi_phy_module_get_info(
(((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \
(MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv))
+/*
+ * The buffer size must be a multiple of dword to ensure that MCDI works
+ * properly with Siena based boards (which use on-chip buffer). Also, it
+ * should be at minimum the size of two dwords to allow space for extended
+ * error responses if the request/response buffer sizes are smaller.
+ */
+#define EFX_MCDI_DECLARE_BUF(_name, _in_len, _out_len) \
+ uint8_t _name[P2ROUNDUP(MAX(MAX(_in_len, _out_len), \
+ (2 * sizeof (efx_dword_t))), \
+ sizeof (efx_dword_t))] = {0}
+
typedef enum efx_mcdi_feature_id_e {
EFX_MCDI_FEATURE_FW_UPDATE = 0,
EFX_MCDI_FEATURE_LINK_CONTROL,
diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c
index 9fc268ec..f28775d0 100644
--- a/drivers/net/sfc/base/efx_mon.c
+++ b/drivers/net/sfc/base/efx_mon.c
@@ -38,7 +38,8 @@ efx_mon_name(
#if EFSYS_OPT_MON_MCDI
static const efx_mon_ops_t __efx_mon_mcdi_ops = {
#if EFSYS_OPT_MON_STATS
- mcdi_mon_stats_update /* emo_stats_update */
+ mcdi_mon_stats_update, /* emo_stats_update */
+ mcdi_mon_limits_update, /* emo_limits_update */
#endif /* EFSYS_OPT_MON_STATS */
};
#endif
@@ -99,77 +100,74 @@ fail1:
#if EFSYS_OPT_NAMES
-/* START MKCONFIG GENERATED MonitorStatNamesBlock 8150a068198c0f96 */
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 277c17eda1a6d1a4 */
static const char * const __mon_stat_name[] = {
- "value_2_5v",
- "value_vccp1",
- "value_vcc",
- "value_5v",
- "value_12v",
- "value_vccp2",
- "value_ext_temp",
- "value_int_temp",
- "value_ain1",
- "value_ain2",
+ "controller_temp",
+ "phy_common_temp",
"controller_cooling",
- "ext_cooling",
- "1v",
- "1_2v",
- "1_8v",
- "3_3v",
- "1_2va",
- "vref",
- "vaoe",
- "aoe_temperature",
- "psu_aoe_temperature",
- "psu_temperature",
- "fan0",
- "fan1",
- "fan2",
- "fan3",
- "fan4",
- "vaoe_in",
- "iaoe",
- "iaoe_in",
+ "phy0_temp",
+ "phy0_cooling",
+ "phy1_temp",
+ "phy1_cooling",
+ "in_1v0",
+ "in_1v2",
+ "in_1v8",
+ "in_2v5",
+ "in_3v3",
+ "in_12v0",
+ "in_1v2a",
+ "in_vref",
+ "out_vaoe",
+ "aoe_temp",
+ "psu_aoe_temp",
+ "psu_temp",
+ "fan_0",
+ "fan_1",
+ "fan_2",
+ "fan_3",
+ "fan_4",
+ "in_vaoe",
+ "out_iaoe",
+ "in_iaoe",
"nic_power",
- "0_9v",
- "i0_9v",
- "i1_2v",
- "0_9v_adc",
- "controller_temperature2",
- "vreg_temperature",
- "vreg_0_9v_temperature",
- "vreg_1_2v_temperature",
- "int_vptat",
- "controller_internal_adc_temperature",
- "ext_vptat",
- "controller_external_adc_temperature",
- "ambient_temperature",
+ "in_0v9",
+ "in_i0v9",
+ "in_i1v2",
+ "in_0v9_adc",
+ "controller_2_temp",
+ "vreg_internal_temp",
+ "vreg_0v9_temp",
+ "vreg_1v2_temp",
+ "controller_vptat",
+ "controller_internal_temp",
+ "controller_vptat_extadc",
+ "controller_internal_temp_extadc",
+ "ambient_temp",
"airflow",
"vdd08d_vss08d_csr",
"vdd08d_vss08d_csr_extadc",
- "hotpoint_temperature",
- "phy_power_switch_port0",
- "phy_power_switch_port1",
+ "hotpoint_temp",
+ "phy_power_port0",
+ "phy_power_port1",
"mum_vcc",
- "0v9_a",
- "i0v9_a",
- "0v9_a_temp",
- "0v9_b",
- "i0v9_b",
- "0v9_b_temp",
+ "in_0v9_a",
+ "in_i0v9_a",
+ "vreg_0v9_a_temp",
+ "in_0v9_b",
+ "in_i0v9_b",
+ "vreg_0v9_b_temp",
"ccom_avreg_1v2_supply",
- "ccom_avreg_1v2_supply_ext_adc",
+ "ccom_avreg_1v2_supply_extadc",
"ccom_avreg_1v8_supply",
- "ccom_avreg_1v8_supply_ext_adc",
+ "ccom_avreg_1v8_supply_extadc",
"controller_master_vptat",
"controller_master_internal_temp",
- "controller_master_vptat_ext_adc",
- "controller_master_internal_temp_ext_adc",
+ "controller_master_vptat_extadc",
+ "controller_master_internal_temp_extadc",
"controller_slave_vptat",
"controller_slave_internal_temp",
- "controller_slave_vptat_ext_adc",
- "controller_slave_internal_temp_ext_adc",
+ "controller_slave_vptat_extadc",
+ "controller_slave_internal_temp_extadc",
"sodimm_vout",
"sodimm_0_temp",
"sodimm_1_temp",
@@ -178,17 +176,17 @@ static const char * const __mon_stat_name[] = {
"controller_tdiode_temp",
"board_front_temp",
"board_back_temp",
- "i1v8",
- "i2v5",
- "i3v3",
- "i12v0",
- "1v3",
- "i1v3",
+ "in_i1v8",
+ "in_i2v5",
+ "in_i3v3",
+ "in_i12v0",
+ "in_1v3",
+ "in_i1v3",
};
/* END MKCONFIG GENERATED MonitorStatNamesBlock */
-extern const char *
+ const char *
efx_mon_stat_name(
__in efx_nic_t *enp,
__in efx_mon_stat_t id)
@@ -200,8 +198,609 @@ efx_mon_stat_name(
return (__mon_stat_name[id]);
}
+typedef struct _stat_description_t {
+ efx_mon_stat_t stat;
+ const char *desc;
+} stat_description_t;
+
+/* START MKCONFIG GENERATED MonitorStatDescriptionsBlock f072138f16d2e1f8 */
+static const char *__mon_stat_description[] = {
+ MC_CMD_SENSOR_CONTROLLER_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_PHY_COMMON_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_COOLING_ENUM_STR,
+ MC_CMD_SENSOR_PHY0_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_PHY0_COOLING_ENUM_STR,
+ MC_CMD_SENSOR_PHY1_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_PHY1_COOLING_ENUM_STR,
+ MC_CMD_SENSOR_IN_1V0_ENUM_STR,
+ MC_CMD_SENSOR_IN_1V2_ENUM_STR,
+ MC_CMD_SENSOR_IN_1V8_ENUM_STR,
+ MC_CMD_SENSOR_IN_2V5_ENUM_STR,
+ MC_CMD_SENSOR_IN_3V3_ENUM_STR,
+ MC_CMD_SENSOR_IN_12V0_ENUM_STR,
+ MC_CMD_SENSOR_IN_1V2A_ENUM_STR,
+ MC_CMD_SENSOR_IN_VREF_ENUM_STR,
+ MC_CMD_SENSOR_OUT_VAOE_ENUM_STR,
+ MC_CMD_SENSOR_AOE_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_PSU_AOE_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_PSU_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_FAN_0_ENUM_STR,
+ MC_CMD_SENSOR_FAN_1_ENUM_STR,
+ MC_CMD_SENSOR_FAN_2_ENUM_STR,
+ MC_CMD_SENSOR_FAN_3_ENUM_STR,
+ MC_CMD_SENSOR_FAN_4_ENUM_STR,
+ MC_CMD_SENSOR_IN_VAOE_ENUM_STR,
+ MC_CMD_SENSOR_OUT_IAOE_ENUM_STR,
+ MC_CMD_SENSOR_IN_IAOE_ENUM_STR,
+ MC_CMD_SENSOR_NIC_POWER_ENUM_STR,
+ MC_CMD_SENSOR_IN_0V9_ENUM_STR,
+ MC_CMD_SENSOR_IN_I0V9_ENUM_STR,
+ MC_CMD_SENSOR_IN_I1V2_ENUM_STR,
+ MC_CMD_SENSOR_IN_0V9_ADC_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_2_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_VREG_INTERNAL_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_VREG_0V9_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_VREG_1V2_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_VPTAT_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_AMBIENT_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_AIRFLOW_ENUM_STR,
+ MC_CMD_SENSOR_VDD08D_VSS08D_CSR_ENUM_STR,
+ MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_HOTPOINT_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_PHY_POWER_PORT0_ENUM_STR,
+ MC_CMD_SENSOR_PHY_POWER_PORT1_ENUM_STR,
+ MC_CMD_SENSOR_MUM_VCC_ENUM_STR,
+ MC_CMD_SENSOR_IN_0V9_A_ENUM_STR,
+ MC_CMD_SENSOR_IN_I0V9_A_ENUM_STR,
+ MC_CMD_SENSOR_VREG_0V9_A_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_IN_0V9_B_ENUM_STR,
+ MC_CMD_SENSOR_IN_I0V9_B_ENUM_STR,
+ MC_CMD_SENSOR_VREG_0V9_B_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_ENUM_STR,
+ MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_ENUM_STR,
+ MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC_ENUM_STR,
+ MC_CMD_SENSOR_SODIMM_VOUT_ENUM_STR,
+ MC_CMD_SENSOR_SODIMM_0_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_SODIMM_1_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_PHY0_VCC_ENUM_STR,
+ MC_CMD_SENSOR_PHY1_VCC_ENUM_STR,
+ MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_BOARD_FRONT_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_BOARD_BACK_TEMP_ENUM_STR,
+ MC_CMD_SENSOR_IN_I1V8_ENUM_STR,
+ MC_CMD_SENSOR_IN_I2V5_ENUM_STR,
+ MC_CMD_SENSOR_IN_I3V3_ENUM_STR,
+ MC_CMD_SENSOR_IN_I12V0_ENUM_STR,
+ MC_CMD_SENSOR_IN_1V3_ENUM_STR,
+ MC_CMD_SENSOR_IN_I1V3_ENUM_STR,
+};
+
+/* END MKCONFIG GENERATED MonitorStatDescriptionsBlock */
+
+ const char *
+efx_mon_stat_description(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
+ return (__mon_stat_description[id]);
+}
+
#endif /* EFSYS_OPT_NAMES */
+/* START MKCONFIG GENERATED MonitorMcdiMappingBlock 173eee0a5599996a */
+ __checkReturn boolean_t
+efx_mon_mcdi_to_efx_stat(
+ __in int mcdi_index,
+ __out efx_mon_stat_t *statp)
+{
+
+ if ((mcdi_index % (MC_CMD_SENSOR_PAGE0_NEXT + 1)) ==
+ MC_CMD_SENSOR_PAGE0_NEXT) {
+ *statp = EFX_MON_NSTATS;
+ return (B_FALSE);
+ }
+
+ switch (mcdi_index) {
+ case MC_CMD_SENSOR_IN_I0V9:
+ *statp = EFX_MON_STAT_IN_I0V9;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC:
+ *statp = EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT:
+ *statp = EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT;
+ break;
+ case MC_CMD_SENSOR_PSU_TEMP:
+ *statp = EFX_MON_STAT_PSU_TEMP;
+ break;
+ case MC_CMD_SENSOR_FAN_2:
+ *statp = EFX_MON_STAT_FAN_2;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC:
+ *statp = EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC;
+ break;
+ case MC_CMD_SENSOR_BOARD_BACK_TEMP:
+ *statp = EFX_MON_STAT_BOARD_BACK_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_1V3:
+ *statp = EFX_MON_STAT_IN_1V3;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP:
+ *statp = EFX_MON_STAT_CONTROLLER_TDIODE_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_2V5:
+ *statp = EFX_MON_STAT_IN_2V5;
+ break;
+ case MC_CMD_SENSOR_PHY_COMMON_TEMP:
+ *statp = EFX_MON_STAT_PHY_COMMON_TEMP;
+ break;
+ case MC_CMD_SENSOR_PHY1_TEMP:
+ *statp = EFX_MON_STAT_PHY1_TEMP;
+ break;
+ case MC_CMD_SENSOR_VREG_INTERNAL_TEMP:
+ *statp = EFX_MON_STAT_VREG_INTERNAL_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_1V0:
+ *statp = EFX_MON_STAT_IN_1V0;
+ break;
+ case MC_CMD_SENSOR_FAN_1:
+ *statp = EFX_MON_STAT_FAN_1;
+ break;
+ case MC_CMD_SENSOR_IN_1V2:
+ *statp = EFX_MON_STAT_IN_1V2;
+ break;
+ case MC_CMD_SENSOR_FAN_3:
+ *statp = EFX_MON_STAT_FAN_3;
+ break;
+ case MC_CMD_SENSOR_IN_1V2A:
+ *statp = EFX_MON_STAT_IN_1V2A;
+ break;
+ case MC_CMD_SENSOR_SODIMM_0_TEMP:
+ *statp = EFX_MON_STAT_SODIMM_0_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_1V8:
+ *statp = EFX_MON_STAT_IN_1V8;
+ break;
+ case MC_CMD_SENSOR_IN_VREF:
+ *statp = EFX_MON_STAT_IN_VREF;
+ break;
+ case MC_CMD_SENSOR_SODIMM_VOUT:
+ *statp = EFX_MON_STAT_SODIMM_VOUT;
+ break;
+ case MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY:
+ *statp = EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY;
+ break;
+ case MC_CMD_SENSOR_IN_I1V2:
+ *statp = EFX_MON_STAT_IN_I1V2;
+ break;
+ case MC_CMD_SENSOR_IN_I1V3:
+ *statp = EFX_MON_STAT_IN_I1V3;
+ break;
+ case MC_CMD_SENSOR_AIRFLOW:
+ *statp = EFX_MON_STAT_AIRFLOW;
+ break;
+ case MC_CMD_SENSOR_HOTPOINT_TEMP:
+ *statp = EFX_MON_STAT_HOTPOINT_TEMP;
+ break;
+ case MC_CMD_SENSOR_VDD08D_VSS08D_CSR:
+ *statp = EFX_MON_STAT_VDD08D_VSS08D_CSR;
+ break;
+ case MC_CMD_SENSOR_AOE_TEMP:
+ *statp = EFX_MON_STAT_AOE_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_I1V8:
+ *statp = EFX_MON_STAT_IN_I1V8;
+ break;
+ case MC_CMD_SENSOR_IN_I2V5:
+ *statp = EFX_MON_STAT_IN_I2V5;
+ break;
+ case MC_CMD_SENSOR_PHY1_COOLING:
+ *statp = EFX_MON_STAT_PHY1_COOLING;
+ break;
+ case MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC:
+ *statp = EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC;
+ break;
+ case MC_CMD_SENSOR_IN_0V9_ADC:
+ *statp = EFX_MON_STAT_IN_0V9_ADC;
+ break;
+ case MC_CMD_SENSOR_VREG_0V9_A_TEMP:
+ *statp = EFX_MON_STAT_VREG_0V9_A_TEMP;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT:
+ *statp = EFX_MON_STAT_CONTROLLER_MASTER_VPTAT;
+ break;
+ case MC_CMD_SENSOR_PHY0_VCC:
+ *statp = EFX_MON_STAT_PHY0_VCC;
+ break;
+ case MC_CMD_SENSOR_PHY0_COOLING:
+ *statp = EFX_MON_STAT_PHY0_COOLING;
+ break;
+ case MC_CMD_SENSOR_PSU_AOE_TEMP:
+ *statp = EFX_MON_STAT_PSU_AOE_TEMP;
+ break;
+ case MC_CMD_SENSOR_VREG_0V9_TEMP:
+ *statp = EFX_MON_STAT_VREG_0V9_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_I0V9_A:
+ *statp = EFX_MON_STAT_IN_I0V9_A;
+ break;
+ case MC_CMD_SENSOR_IN_I3V3:
+ *statp = EFX_MON_STAT_IN_I3V3;
+ break;
+ case MC_CMD_SENSOR_BOARD_FRONT_TEMP:
+ *statp = EFX_MON_STAT_BOARD_FRONT_TEMP;
+ break;
+ case MC_CMD_SENSOR_OUT_VAOE:
+ *statp = EFX_MON_STAT_OUT_VAOE;
+ break;
+ case MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC:
+ *statp = EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC;
+ break;
+ case MC_CMD_SENSOR_IN_I12V0:
+ *statp = EFX_MON_STAT_IN_I12V0;
+ break;
+ case MC_CMD_SENSOR_PHY_POWER_PORT1:
+ *statp = EFX_MON_STAT_PHY_POWER_PORT1;
+ break;
+ case MC_CMD_SENSOR_PHY_POWER_PORT0:
+ *statp = EFX_MON_STAT_PHY_POWER_PORT0;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC:
+ *statp = EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP:
+ *statp = EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_TEMP:
+ *statp = EFX_MON_STAT_CONTROLLER_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_IAOE:
+ *statp = EFX_MON_STAT_IN_IAOE;
+ break;
+ case MC_CMD_SENSOR_IN_VAOE:
+ *statp = EFX_MON_STAT_IN_VAOE;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC:
+ *statp = EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC;
+ break;
+ case MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY:
+ *statp = EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY;
+ break;
+ case MC_CMD_SENSOR_PHY1_VCC:
+ *statp = EFX_MON_STAT_PHY1_VCC;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_COOLING:
+ *statp = EFX_MON_STAT_CONTROLLER_COOLING;
+ break;
+ case MC_CMD_SENSOR_AMBIENT_TEMP:
+ *statp = EFX_MON_STAT_AMBIENT_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_3V3:
+ *statp = EFX_MON_STAT_IN_3V3;
+ break;
+ case MC_CMD_SENSOR_PHY0_TEMP:
+ *statp = EFX_MON_STAT_PHY0_TEMP;
+ break;
+ case MC_CMD_SENSOR_SODIMM_1_TEMP:
+ *statp = EFX_MON_STAT_SODIMM_1_TEMP;
+ break;
+ case MC_CMD_SENSOR_MUM_VCC:
+ *statp = EFX_MON_STAT_MUM_VCC;
+ break;
+ case MC_CMD_SENSOR_VREG_0V9_B_TEMP:
+ *statp = EFX_MON_STAT_VREG_0V9_B_TEMP;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP:
+ *statp = EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP;
+ break;
+ case MC_CMD_SENSOR_FAN_4:
+ *statp = EFX_MON_STAT_FAN_4;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_2_TEMP:
+ *statp = EFX_MON_STAT_CONTROLLER_2_TEMP;
+ break;
+ case MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC:
+ *statp = EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC;
+ break;
+ case MC_CMD_SENSOR_IN_0V9_A:
+ *statp = EFX_MON_STAT_IN_0V9_A;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC:
+ *statp = EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC;
+ break;
+ case MC_CMD_SENSOR_IN_0V9:
+ *statp = EFX_MON_STAT_IN_0V9;
+ break;
+ case MC_CMD_SENSOR_IN_I0V9_B:
+ *statp = EFX_MON_STAT_IN_I0V9_B;
+ break;
+ case MC_CMD_SENSOR_NIC_POWER:
+ *statp = EFX_MON_STAT_NIC_POWER;
+ break;
+ case MC_CMD_SENSOR_IN_12V0:
+ *statp = EFX_MON_STAT_IN_12V0;
+ break;
+ case MC_CMD_SENSOR_OUT_IAOE:
+ *statp = EFX_MON_STAT_OUT_IAOE;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_VPTAT:
+ *statp = EFX_MON_STAT_CONTROLLER_VPTAT;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC:
+ *statp = EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC;
+ break;
+ case MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP:
+ *statp = EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP;
+ break;
+ case MC_CMD_SENSOR_FAN_0:
+ *statp = EFX_MON_STAT_FAN_0;
+ break;
+ case MC_CMD_SENSOR_VREG_1V2_TEMP:
+ *statp = EFX_MON_STAT_VREG_1V2_TEMP;
+ break;
+ case MC_CMD_SENSOR_IN_0V9_B:
+ *statp = EFX_MON_STAT_IN_0V9_B;
+ break;
+ default:
+ *statp = EFX_MON_NSTATS;
+ break;
+ };
+
+ if (*statp == EFX_MON_NSTATS)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, boolean_t, B_TRUE);
+ return (B_FALSE);
+};
+
+/* END MKCONFIG GENERATED MonitorMcdiMappingBlock */
+
+/* START MKCONFIG GENERATED MonitorStatisticUnitsBlock 2d447c656cc2d01d */
+ __checkReturn boolean_t
+efx_mon_get_stat_unit(
+ __in efx_mon_stat_t stat,
+ __out efx_mon_stat_unit_t *unitp)
+{
+ switch (stat) {
+ case EFX_MON_STAT_IN_1V0:
+ case EFX_MON_STAT_IN_1V2:
+ case EFX_MON_STAT_IN_1V8:
+ case EFX_MON_STAT_IN_2V5:
+ case EFX_MON_STAT_IN_3V3:
+ case EFX_MON_STAT_IN_12V0:
+ case EFX_MON_STAT_IN_1V2A:
+ case EFX_MON_STAT_IN_VREF:
+ case EFX_MON_STAT_OUT_VAOE:
+ case EFX_MON_STAT_IN_VAOE:
+ case EFX_MON_STAT_IN_0V9:
+ case EFX_MON_STAT_IN_0V9_ADC:
+ case EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC:
+ case EFX_MON_STAT_VDD08D_VSS08D_CSR:
+ case EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC:
+ case EFX_MON_STAT_MUM_VCC:
+ case EFX_MON_STAT_IN_0V9_A:
+ case EFX_MON_STAT_IN_0V9_B:
+ case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY:
+ case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC:
+ case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY:
+ case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT:
+ case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC:
+ case EFX_MON_STAT_SODIMM_VOUT:
+ case EFX_MON_STAT_PHY0_VCC:
+ case EFX_MON_STAT_PHY1_VCC:
+ case EFX_MON_STAT_IN_1V3:
+ *unitp = EFX_MON_STAT_UNIT_VOLTAGE_MV;
+ break;
+ case EFX_MON_STAT_CONTROLLER_TEMP:
+ case EFX_MON_STAT_PHY_COMMON_TEMP:
+ case EFX_MON_STAT_PHY0_TEMP:
+ case EFX_MON_STAT_PHY1_TEMP:
+ case EFX_MON_STAT_AOE_TEMP:
+ case EFX_MON_STAT_PSU_AOE_TEMP:
+ case EFX_MON_STAT_PSU_TEMP:
+ case EFX_MON_STAT_CONTROLLER_2_TEMP:
+ case EFX_MON_STAT_VREG_INTERNAL_TEMP:
+ case EFX_MON_STAT_VREG_0V9_TEMP:
+ case EFX_MON_STAT_VREG_1V2_TEMP:
+ case EFX_MON_STAT_CONTROLLER_VPTAT:
+ case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP:
+ case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC:
+ case EFX_MON_STAT_AMBIENT_TEMP:
+ case EFX_MON_STAT_HOTPOINT_TEMP:
+ case EFX_MON_STAT_VREG_0V9_A_TEMP:
+ case EFX_MON_STAT_VREG_0V9_B_TEMP:
+ case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP:
+ case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC:
+ case EFX_MON_STAT_SODIMM_0_TEMP:
+ case EFX_MON_STAT_SODIMM_1_TEMP:
+ case EFX_MON_STAT_CONTROLLER_TDIODE_TEMP:
+ case EFX_MON_STAT_BOARD_FRONT_TEMP:
+ case EFX_MON_STAT_BOARD_BACK_TEMP:
+ *unitp = EFX_MON_STAT_UNIT_TEMP_C;
+ break;
+ case EFX_MON_STAT_CONTROLLER_COOLING:
+ case EFX_MON_STAT_PHY0_COOLING:
+ case EFX_MON_STAT_PHY1_COOLING:
+ case EFX_MON_STAT_AIRFLOW:
+ case EFX_MON_STAT_PHY_POWER_PORT0:
+ case EFX_MON_STAT_PHY_POWER_PORT1:
+ *unitp = EFX_MON_STAT_UNIT_BOOL;
+ break;
+ case EFX_MON_STAT_NIC_POWER:
+ *unitp = EFX_MON_STAT_UNIT_POWER_W;
+ break;
+ case EFX_MON_STAT_OUT_IAOE:
+ case EFX_MON_STAT_IN_IAOE:
+ case EFX_MON_STAT_IN_I0V9:
+ case EFX_MON_STAT_IN_I1V2:
+ case EFX_MON_STAT_IN_I0V9_A:
+ case EFX_MON_STAT_IN_I0V9_B:
+ case EFX_MON_STAT_IN_I1V8:
+ case EFX_MON_STAT_IN_I2V5:
+ case EFX_MON_STAT_IN_I3V3:
+ case EFX_MON_STAT_IN_I12V0:
+ case EFX_MON_STAT_IN_I1V3:
+ *unitp = EFX_MON_STAT_UNIT_CURRENT_MA;
+ break;
+ case EFX_MON_STAT_FAN_0:
+ case EFX_MON_STAT_FAN_1:
+ case EFX_MON_STAT_FAN_2:
+ case EFX_MON_STAT_FAN_3:
+ case EFX_MON_STAT_FAN_4:
+ *unitp = EFX_MON_STAT_UNIT_RPM;
+ break;
+ default:
+ *unitp = EFX_MON_STAT_UNIT_UNKNOWN;
+ break;
+ };
+
+ if (*unitp == EFX_MON_STAT_UNIT_UNKNOWN)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, boolean_t, B_TRUE);
+ return (B_FALSE);
+};
+
+/* END MKCONFIG GENERATED MonitorStatisticUnitsBlock */
+
+/* START MKCONFIG GENERATED MonitorStatisticPortsBlock 1719b751d842534f */
+ __checkReturn boolean_t
+efx_mon_get_stat_portmap(
+ __in efx_mon_stat_t stat,
+ __out efx_mon_stat_portmask_t *maskp)
+{
+
+ switch (stat) {
+ case EFX_MON_STAT_PHY1_TEMP:
+ case EFX_MON_STAT_PHY1_COOLING:
+ case EFX_MON_STAT_PHY_POWER_PORT1:
+ *maskp = EFX_MON_STAT_PORTMAP_PORT1;
+ break;
+ case EFX_MON_STAT_CONTROLLER_TEMP:
+ case EFX_MON_STAT_PHY_COMMON_TEMP:
+ case EFX_MON_STAT_CONTROLLER_COOLING:
+ case EFX_MON_STAT_IN_1V0:
+ case EFX_MON_STAT_IN_1V2:
+ case EFX_MON_STAT_IN_1V8:
+ case EFX_MON_STAT_IN_2V5:
+ case EFX_MON_STAT_IN_3V3:
+ case EFX_MON_STAT_IN_12V0:
+ case EFX_MON_STAT_IN_1V2A:
+ case EFX_MON_STAT_IN_VREF:
+ case EFX_MON_STAT_OUT_VAOE:
+ case EFX_MON_STAT_AOE_TEMP:
+ case EFX_MON_STAT_PSU_AOE_TEMP:
+ case EFX_MON_STAT_PSU_TEMP:
+ case EFX_MON_STAT_FAN_0:
+ case EFX_MON_STAT_FAN_1:
+ case EFX_MON_STAT_FAN_2:
+ case EFX_MON_STAT_FAN_3:
+ case EFX_MON_STAT_FAN_4:
+ case EFX_MON_STAT_IN_VAOE:
+ case EFX_MON_STAT_OUT_IAOE:
+ case EFX_MON_STAT_IN_IAOE:
+ case EFX_MON_STAT_NIC_POWER:
+ case EFX_MON_STAT_IN_0V9:
+ case EFX_MON_STAT_IN_I0V9:
+ case EFX_MON_STAT_IN_I1V2:
+ case EFX_MON_STAT_IN_0V9_ADC:
+ case EFX_MON_STAT_CONTROLLER_2_TEMP:
+ case EFX_MON_STAT_VREG_INTERNAL_TEMP:
+ case EFX_MON_STAT_VREG_0V9_TEMP:
+ case EFX_MON_STAT_VREG_1V2_TEMP:
+ case EFX_MON_STAT_CONTROLLER_VPTAT:
+ case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP:
+ case EFX_MON_STAT_CONTROLLER_VPTAT_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_INTERNAL_TEMP_EXTADC:
+ case EFX_MON_STAT_AMBIENT_TEMP:
+ case EFX_MON_STAT_AIRFLOW:
+ case EFX_MON_STAT_VDD08D_VSS08D_CSR:
+ case EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC:
+ case EFX_MON_STAT_HOTPOINT_TEMP:
+ case EFX_MON_STAT_MUM_VCC:
+ case EFX_MON_STAT_IN_0V9_A:
+ case EFX_MON_STAT_IN_I0V9_A:
+ case EFX_MON_STAT_VREG_0V9_A_TEMP:
+ case EFX_MON_STAT_IN_0V9_B:
+ case EFX_MON_STAT_IN_I0V9_B:
+ case EFX_MON_STAT_VREG_0V9_B_TEMP:
+ case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY:
+ case EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXTADC:
+ case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY:
+ case EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT:
+ case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP:
+ case EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXTADC:
+ case EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC:
+ case EFX_MON_STAT_SODIMM_VOUT:
+ case EFX_MON_STAT_SODIMM_0_TEMP:
+ case EFX_MON_STAT_SODIMM_1_TEMP:
+ case EFX_MON_STAT_PHY0_VCC:
+ case EFX_MON_STAT_PHY1_VCC:
+ case EFX_MON_STAT_CONTROLLER_TDIODE_TEMP:
+ case EFX_MON_STAT_BOARD_FRONT_TEMP:
+ case EFX_MON_STAT_BOARD_BACK_TEMP:
+ case EFX_MON_STAT_IN_I1V8:
+ case EFX_MON_STAT_IN_I2V5:
+ case EFX_MON_STAT_IN_I3V3:
+ case EFX_MON_STAT_IN_I12V0:
+ case EFX_MON_STAT_IN_1V3:
+ case EFX_MON_STAT_IN_I1V3:
+ *maskp = EFX_MON_STAT_PORTMAP_ALL;
+ break;
+ case EFX_MON_STAT_PHY0_TEMP:
+ case EFX_MON_STAT_PHY0_COOLING:
+ case EFX_MON_STAT_PHY_POWER_PORT0:
+ *maskp = EFX_MON_STAT_PORTMAP_PORT0;
+ break;
+ default:
+ *maskp = EFX_MON_STAT_PORTMAP_UNKNOWN;
+ break;
+ };
+
+ if (*maskp == EFX_MON_STAT_PORTMAP_UNKNOWN)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, boolean_t, B_TRUE);
+ return (B_FALSE);
+};
+
+/* END MKCONFIG GENERATED MonitorStatisticPortsBlock */
+
__checkReturn efx_rc_t
efx_mon_stats_update(
__in efx_nic_t *enp,
@@ -217,6 +816,20 @@ efx_mon_stats_update(
return (emop->emo_stats_update(enp, esmp, values));
}
+ __checkReturn efx_rc_t
+efx_mon_limits_update(
+ __in efx_nic_t *enp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop = emp->em_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ return (emop->emo_limits_update(enp, values));
+}
+
#endif /* EFSYS_OPT_MON_STATS */
void
diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c
index 6c162e03..cea32b79 100644
--- a/drivers/net/sfc/base/efx_nic.c
+++ b/drivers/net/sfc/base/efx_nic.c
@@ -100,6 +100,8 @@ static const efx_nic_ops_t __efx_nic_siena_ops = {
siena_nic_init, /* eno_init */
NULL, /* eno_get_vi_pool */
NULL, /* eno_get_bar_region */
+ NULL, /* eno_hw_unavailable */
+ NULL, /* eno_set_hw_unavailable */
#if EFSYS_OPT_DIAG
siena_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
@@ -119,6 +121,8 @@ static const efx_nic_ops_t __efx_nic_hunt_ops = {
ef10_nic_init, /* eno_init */
ef10_nic_get_vi_pool, /* eno_get_vi_pool */
ef10_nic_get_bar_region, /* eno_get_bar_region */
+ ef10_nic_hw_unavailable, /* eno_hw_unavailable */
+ ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */
#if EFSYS_OPT_DIAG
ef10_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
@@ -138,6 +142,8 @@ static const efx_nic_ops_t __efx_nic_medford_ops = {
ef10_nic_init, /* eno_init */
ef10_nic_get_vi_pool, /* eno_get_vi_pool */
ef10_nic_get_bar_region, /* eno_get_bar_region */
+ ef10_nic_hw_unavailable, /* eno_hw_unavailable */
+ ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */
#if EFSYS_OPT_DIAG
ef10_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
@@ -157,6 +163,8 @@ static const efx_nic_ops_t __efx_nic_medford2_ops = {
ef10_nic_init, /* eno_init */
ef10_nic_get_vi_pool, /* eno_get_vi_pool */
ef10_nic_get_bar_region, /* eno_get_bar_region */
+ ef10_nic_hw_unavailable, /* eno_hw_unavailable */
+ ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */
#if EFSYS_OPT_DIAG
ef10_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
@@ -549,7 +557,7 @@ efx_nic_reset(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
/*
- * All modules except the MCDI, PROBE, NVRAM, VPD, MON
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON, TUNNEL
* (which we do not reset here) must have been shut down or never
* initialized.
*
@@ -559,7 +567,10 @@ efx_nic_reset(
*/
mod_flags = enp->en_mod_flags;
mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
- EFX_MOD_VPD | EFX_MOD_MON);
+ EFX_MOD_VPD | EFX_MOD_MON);
+#if EFSYS_OPT_TUNNEL
+ mod_flags &= ~EFX_MOD_TUNNEL;
+#endif /* EFSYS_OPT_TUNNEL */
EFSYS_ASSERT3U(mod_flags, ==, 0);
if (mod_flags != 0) {
rc = EINVAL;
@@ -584,6 +595,7 @@ efx_nic_cfg_get(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
return (&(enp->en_nic_cfg));
}
@@ -649,6 +661,39 @@ fail1:
return (rc);
}
+ __checkReturn boolean_t
+efx_nic_hw_unavailable(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ /* NOTE: can be used by MCDI before NIC probe */
+
+ if (enop->eno_hw_unavailable != NULL) {
+ if ((enop->eno_hw_unavailable)(enp) != B_FALSE)
+ goto unavail;
+ }
+
+ return (B_FALSE);
+
+unavail:
+ return (B_TRUE);
+}
+
+ void
+efx_nic_set_hw_unavailable(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ if (enop->eno_set_hw_unavailable != NULL)
+ enop->eno_set_hw_unavailable(enp);
+}
+
+
#if EFSYS_OPT_DIAG
__checkReturn efx_rc_t
@@ -786,13 +831,12 @@ efx_mcdi_get_loopback_modes(
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
- MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN);
efx_qword_t mask;
efx_qword_t modes;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c
index be409c3a..5296c59b 100644
--- a/drivers/net/sfc/base/efx_nvram.c
+++ b/drivers/net/sfc/base/efx_nvram.c
@@ -468,7 +468,7 @@ efx_nvram_validate(
goto fail1;
if (envop->envo_buffer_validate != NULL) {
- if ((rc = envop->envo_buffer_validate(enp, partn,
+ if ((rc = envop->envo_buffer_validate(partn,
partn_data, partn_size)) != 0)
goto fail2;
}
@@ -514,12 +514,11 @@ efx_mcdi_nvram_partitions(
__out unsigned int *npartnp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN,
- MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_PARTITIONS_IN_LEN,
+ MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
unsigned int npartn;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_PARTITIONS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN;
@@ -577,11 +576,10 @@ efx_mcdi_nvram_metadata(
__in size_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN,
- MC_CMD_NVRAM_METADATA_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_METADATA_IN_LEN,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_METADATA;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN;
@@ -667,12 +665,11 @@ efx_mcdi_nvram_info(
__out_opt uint32_t *erase_sizep,
__out_opt uint32_t *write_sizep)
{
- uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
- MC_CMD_NVRAM_INFO_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN);
efx_mcdi_req_t req;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
@@ -728,12 +725,11 @@ efx_mcdi_nvram_update_start(
__in efx_nic_t *enp,
__in uint32_t partn)
{
- uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
- MC_CMD_NVRAM_UPDATE_START_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_START_OUT_LEN);
efx_mcdi_req_t req;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN;
@@ -770,8 +766,8 @@ efx_mcdi_nvram_read(
__in uint32_t mode)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_V2_LEN,
- MC_CMD_NVRAM_READ_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_READ_IN_V2_LEN,
+ MC_CMD_NVRAM_READ_OUT_LENMAX);
efx_rc_t rc;
if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) {
@@ -779,7 +775,6 @@ efx_mcdi_nvram_read(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_READ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN;
@@ -825,11 +820,10 @@ efx_mcdi_nvram_erase(
__in size_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN,
- MC_CMD_NVRAM_ERASE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_ERASE_IN_LEN,
+ MC_CMD_NVRAM_ERASE_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_ERASE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
@@ -865,27 +859,31 @@ efx_mcdi_nvram_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in uint32_t offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MCDI_CTL_SDU_LEN_MAX_V1,
- MCDI_CTL_SDU_LEN_MAX_V2)];
+ uint8_t *payload;
efx_rc_t rc;
size_t max_data_size;
+ size_t payload_len = enp->en_nic_cfg.enc_mcdi_max_payload_length;
- max_data_size = enp->en_nic_cfg.enc_mcdi_max_payload_length
- - MC_CMD_NVRAM_WRITE_IN_LEN(0);
- EFSYS_ASSERT3U(enp->en_nic_cfg.enc_mcdi_max_payload_length, >, 0);
- EFSYS_ASSERT3U(max_data_size, <,
- enp->en_nic_cfg.enc_mcdi_max_payload_length);
+ max_data_size = payload_len - MC_CMD_NVRAM_WRITE_IN_LEN(0);
+ EFSYS_ASSERT3U(payload_len, >, 0);
+ EFSYS_ASSERT3U(max_data_size, <, payload_len);
if (size > max_data_size) {
rc = EINVAL;
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
+ EFSYS_KMEM_ALLOC(enp->en_esip, payload_len, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, payload_len);
req.emr_cmd = MC_CMD_NVRAM_WRITE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size);
@@ -903,11 +901,16 @@ efx_mcdi_nvram_write(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail2;
+ goto fail3;
}
+ EFSYS_KMEM_FREE(enp->en_esip, payload_len, payload);
+
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
+ EFSYS_KMEM_FREE(enp->en_esip, payload_len, payload);
fail2:
EFSYS_PROBE(fail2);
fail1:
@@ -930,12 +933,11 @@ efx_mcdi_nvram_update_finish(
{
const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
- MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
uint32_t verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN;
@@ -1001,12 +1003,11 @@ efx_mcdi_nvram_test(
__in uint32_t partn)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
- MC_CMD_NVRAM_TEST_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN);
int result;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_TEST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c
index ba2f51c1..36a7bbd3 100644
--- a/drivers/net/sfc/base/efx_phy.c
+++ b/drivers/net/sfc/base/efx_phy.c
@@ -15,6 +15,7 @@ static const efx_phy_ops_t __efx_phy_siena_ops = {
siena_phy_reconfigure, /* epo_reconfigure */
siena_phy_verify, /* epo_verify */
siena_phy_oui_get, /* epo_oui_get */
+ NULL, /* epo_link_state_get */
#if EFSYS_OPT_PHY_STATS
siena_phy_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
@@ -34,6 +35,7 @@ static const efx_phy_ops_t __efx_phy_ef10_ops = {
ef10_phy_reconfigure, /* epo_reconfigure */
ef10_phy_verify, /* epo_verify */
ef10_phy_oui_get, /* epo_oui_get */
+ ef10_phy_link_state_get, /* epo_link_state_get */
#if EFSYS_OPT_PHY_STATS
ef10_phy_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
@@ -286,8 +288,8 @@ efx_phy_media_type_get(
efx_phy_module_get_info(
__in efx_nic_t *enp,
__in uint8_t dev_addr,
- __in uint8_t offset,
- __in uint8_t len,
+ __in size_t offset,
+ __in size_t len,
__out_bcount(len) uint8_t *data)
{
efx_rc_t rc;
@@ -295,7 +297,8 @@ efx_phy_module_get_info(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(data != NULL);
- if ((uint32_t)offset + len > 0xff) {
+ if ((offset > EFX_PHY_MEDIA_INFO_MAX_OFFSET) ||
+ ((offset + len) > EFX_PHY_MEDIA_INFO_MAX_OFFSET)) {
rc = EINVAL;
goto fail1;
}
@@ -314,6 +317,57 @@ fail1:
return (rc);
}
+ __checkReturn efx_rc_t
+efx_phy_fec_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_fec_type_t *typep)
+{
+ efx_rc_t rc;
+ efx_phy_link_state_t epls;
+
+ if ((rc = efx_phy_link_state_get(enp, &epls)) != 0)
+ goto fail1;
+
+ *typep = epls.epls_fec;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_phy_link_state_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_link_state_t *eplsp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (epop->epo_link_state_get == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_link_state_get(enp, eplsp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
#if EFSYS_OPT_PHY_STATS
#if EFSYS_OPT_NAMES
diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c
index 33a1a084..5fff932b 100644
--- a/drivers/net/sfc/base/efx_port.c
+++ b/drivers/net/sfc/base/efx_port.c
@@ -37,7 +37,7 @@ efx_port_init(
epp->ep_emop->emo_reconfigure(enp);
/* Pick up current phy capababilities */
- efx_port_poll(enp, NULL);
+ (void) efx_port_poll(enp, NULL);
/*
* Turn on the PHY if available, otherwise reset it, and
diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c
index 4fd73bab..04bc7aed 100644
--- a/drivers/net/sfc/base/efx_rx.c
+++ b/drivers/net/sfc/base/efx_rx.c
@@ -298,84 +298,104 @@ fail1:
efx_rx_scale_hash_flags_get(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t hash_alg,
- __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out_ecount_part(max_nflags, *nflagsp) unsigned int *flagsp,
+ __in unsigned int max_nflags,
__out unsigned int *nflagsp)
{
efx_nic_cfg_t *encp = &enp->en_nic_cfg;
- boolean_t l4;
- boolean_t additional_modes;
- unsigned int *entryp = flags;
+ unsigned int nflags = 0;
efx_rc_t rc;
- if (flags == NULL || nflagsp == NULL) {
+ if (flagsp == NULL || nflagsp == NULL) {
rc = EINVAL;
goto fail1;
}
- l4 = encp->enc_rx_scale_l4_hash_supported;
- additional_modes = encp->enc_rx_scale_additional_modes_supported;
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) {
+ nflags = 0;
+ goto done;
+ }
-#define LIST_FLAGS(_entryp, _class, _l4_hashing, _additional_modes) \
- do { \
- if (_l4_hashing) { \
- *(_entryp++) = EFX_RX_HASH(_class, 4TUPLE); \
- \
- if (_additional_modes) { \
- *(_entryp++) = \
- EFX_RX_HASH(_class, 2TUPLE_DST); \
- *(_entryp++) = \
- EFX_RX_HASH(_class, 2TUPLE_SRC); \
- } \
- } \
- \
- *(_entryp++) = EFX_RX_HASH(_class, 2TUPLE); \
- \
- if (_additional_modes) { \
- *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_DST); \
- *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_SRC); \
- } \
- \
- *(_entryp++) = EFX_RX_HASH(_class, DISABLE); \
- \
- _NOTE(CONSTANTCONDITION) \
+ /* Helper to add flags word to flags array without buffer overflow */
+#define INSERT_FLAGS(_flags) \
+ do { \
+ if (nflags >= max_nflags) { \
+ rc = E2BIG; \
+ goto fail2; \
+ } \
+ *(flagsp + nflags) = (_flags); \
+ nflags++; \
+ \
+ _NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
- switch (hash_alg) {
- case EFX_RX_HASHALG_PACKED_STREAM:
- if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
- break;
- /* FALLTHRU */
- case EFX_RX_HASHALG_TOEPLITZ:
- if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
- break;
+ if (encp->enc_rx_scale_l4_hash_supported != B_FALSE) {
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 4TUPLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 4TUPLE));
+ }
- LIST_FLAGS(entryp, IPV4_TCP, l4, additional_modes);
- LIST_FLAGS(entryp, IPV6_TCP, l4, additional_modes);
+ if ((encp->enc_rx_scale_l4_hash_supported != B_FALSE) &&
+ (encp->enc_rx_scale_additional_modes_supported != B_FALSE)) {
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_SRC));
- if (additional_modes) {
- LIST_FLAGS(entryp, IPV4_UDP, l4, additional_modes);
- LIST_FLAGS(entryp, IPV6_UDP, l4, additional_modes);
- }
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_SRC));
- LIST_FLAGS(entryp, IPV4, B_FALSE, additional_modes);
- LIST_FLAGS(entryp, IPV6, B_FALSE, additional_modes);
- break;
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 4TUPLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_SRC));
- default:
- rc = EINVAL;
- goto fail2;
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 4TUPLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_SRC));
+ }
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE));
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV4, 2TUPLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6, 2TUPLE));
+
+ if (encp->enc_rx_scale_additional_modes_supported != B_FALSE) {
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_SRC));
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_SRC));
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_SRC));
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_SRC));
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_SRC));
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_DST));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_SRC));
}
-#undef LIST_FLAGS
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, DISABLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, DISABLE));
+
+ INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, DISABLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, DISABLE));
- *nflagsp = (unsigned int)(entryp - flags);
- EFSYS_ASSERT3U(*nflagsp, <=, EFX_RX_HASH_NFLAGS);
+ INSERT_FLAGS(EFX_RX_HASH(IPV4, DISABLE));
+ INSERT_FLAGS(EFX_RX_HASH(IPV6, DISABLE));
+#undef INSERT_FLAGS
+
+done:
+ *nflagsp = nflags;
return (0);
fail2:
EFSYS_PROBE(fail2);
-
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -512,9 +532,8 @@ efx_rx_scale_mode_set(
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
const efx_rx_ops_t *erxop = enp->en_erxop;
- unsigned int type_flags[EFX_RX_HASH_NFLAGS];
- unsigned int type_nflags;
efx_rx_hash_type_t type_check;
unsigned int i;
efx_rc_t rc;
@@ -533,46 +552,60 @@ efx_rx_scale_mode_set(
}
/*
- * Translate legacy flags to the new representation
- * so that chip-specific handlers will consider the
- * new flags only.
+ * If RSS hash type is represented by additional bits
+ * in the value, the latter need to be verified since
+ * not all bit combinations are valid RSS modes. Also,
+ * depending on the firmware, some valid combinations
+ * may be unsupported. Discern additional bits in the
+ * type value and try to recognise valid combinations.
+ * If some bits remain unrecognised, report the error.
*/
- if (type & EFX_RX_HASH_IPV4) {
- type |= EFX_RX_HASH(IPV4, 2TUPLE);
- type |= EFX_RX_HASH(IPV4_TCP, 2TUPLE);
- type |= EFX_RX_HASH(IPV4_UDP, 2TUPLE);
- }
-
- if (type & EFX_RX_HASH_TCPIPV4)
- type |= EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ type_check = type & ~EFX_RX_HASH_LEGACY_MASK;
+ if (type_check != 0) {
+ unsigned int type_flags[EFX_RX_HASH_NFLAGS];
+ unsigned int type_nflags;
- if (type & EFX_RX_HASH_IPV6) {
- type |= EFX_RX_HASH(IPV6, 2TUPLE);
- type |= EFX_RX_HASH(IPV6_TCP, 2TUPLE);
- type |= EFX_RX_HASH(IPV6_UDP, 2TUPLE);
- }
+ rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags,
+ EFX_ARRAY_SIZE(type_flags), &type_nflags);
+ if (rc != 0)
+ goto fail2;
- if (type & EFX_RX_HASH_TCPIPV6)
- type |= EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+ for (i = 0; i < type_nflags; ++i) {
+ if ((type_check & type_flags[i]) == type_flags[i])
+ type_check &= ~(type_flags[i]);
+ }
- type &= ~EFX_RX_HASH_LEGACY_MASK;
- type_check = type;
+ if (type_check != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ }
/*
- * Get the list of supported hash flags and sanitise the input.
+ * Translate EFX_RX_HASH() flags to their legacy counterparts
+ * provided that the FW claims no support for additional modes.
*/
- rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags, &type_nflags);
- if (rc != 0)
- goto fail2;
-
- for (i = 0; i < type_nflags; ++i) {
- if ((type_check & type_flags[i]) == type_flags[i])
- type_check &= ~(type_flags[i]);
- }
+ if (encp->enc_rx_scale_additional_modes_supported == B_FALSE) {
+ efx_rx_hash_type_t t_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) |
+ EFX_RX_HASH(IPV4_TCP, 2TUPLE);
+ efx_rx_hash_type_t t_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) |
+ EFX_RX_HASH(IPV6_TCP, 2TUPLE);
+ efx_rx_hash_type_t t_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ efx_rx_hash_type_t t_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+
+ if ((type & t_ipv4) == t_ipv4)
+ type |= EFX_RX_HASH_IPV4;
+ if ((type & t_ipv6) == t_ipv6)
+ type |= EFX_RX_HASH_IPV6;
+
+ if (encp->enc_rx_scale_l4_hash_supported == B_TRUE) {
+ if ((type & t_ipv4_tcp) == t_ipv4_tcp)
+ type |= EFX_RX_HASH_TCPIPV4;
+ if ((type & t_ipv6_tcp) == t_ipv6_tcp)
+ type |= EFX_RX_HASH_TCPIPV6;
+ }
- if (type_check != 0) {
- rc = EINVAL;
- goto fail3;
+ type &= EFX_RX_HASH_LEGACY_MASK;
}
if (erxop->erxo_scale_mode_set != NULL) {
@@ -831,7 +864,7 @@ efx_rx_qcreate_packed_stream(
{
efx_rxq_type_data_t type_data;
- memset(&type_data, 0, sizeof(type_data));
+ memset(&type_data, 0, sizeof (type_data));
type_data.ertd_packed_stream.eps_buf_size = ps_buf_size;
@@ -867,7 +900,7 @@ efx_rx_qcreate_es_super_buffer(
goto fail1;
}
- memset(&type_data, 0, sizeof(type_data));
+ memset(&type_data, 0, sizeof (type_data));
type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc;
type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len;
@@ -1090,10 +1123,6 @@ siena_rx_scale_mode_set(
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
- efx_rx_hash_type_t type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE);
- efx_rx_hash_type_t type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
- efx_rx_hash_type_t type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE);
- efx_rx_hash_type_t type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
efx_rc_t rc;
if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
@@ -1108,12 +1137,12 @@ siena_rx_scale_mode_set(
case EFX_RX_HASHALG_TOEPLITZ:
EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
- (type & type_ipv4) == type_ipv4,
- (type & type_ipv4_tcp) == type_ipv4_tcp);
+ (type & EFX_RX_HASH_IPV4) ? B_TRUE : B_FALSE,
+ (type & EFX_RX_HASH_TCPIPV4) ? B_TRUE : B_FALSE);
EFX_RX_TOEPLITZ_IPV6_HASH(enp,
- (type & type_ipv6) == type_ipv6,
- (type & type_ipv6_tcp) == type_ipv6_tcp,
+ (type & EFX_RX_HASH_IPV6) ? B_TRUE : B_FALSE,
+ (type & EFX_RX_HASH_TCPIPV6) ? B_TRUE : B_FALSE,
rc);
if (rc != 0)
goto fail2;
diff --git a/drivers/net/sfc/base/efx_tunnel.c b/drivers/net/sfc/base/efx_tunnel.c
index 399fd540..edb6be02 100644
--- a/drivers/net/sfc/base/efx_tunnel.c
+++ b/drivers/net/sfc/base/efx_tunnel.c
@@ -40,8 +40,9 @@ efx_mcdi_set_tunnel_encap_udp_ports(
__out boolean_t *resetting)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX,
- MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX,
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
efx_word_t flags;
efx_rc_t rc;
unsigned int i;
@@ -52,7 +53,6 @@ efx_mcdi_set_tunnel_encap_udp_ports(
else
entries_num = etcp->etc_udp_entries_num;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS;
req.emr_in_buf = payload;
req.emr_in_length =
diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c
index da37580a..bf1180a1 100644
--- a/drivers/net/sfc/base/efx_tx.c
+++ b/drivers/net/sfc/base/efx_tx.c
@@ -572,19 +572,10 @@ efx_tx_qdesc_post(
{
efx_nic_t *enp = etp->et_enp;
const efx_tx_ops_t *etxop = enp->en_etxop;
- efx_rc_t rc;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
- if ((rc = etxop->etxo_qdesc_post(etp, ed,
- ndescs, completed, addedp)) != 0)
- goto fail1;
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
- return (rc);
+ return (etxop->etxo_qdesc_post(etp, ed, ndescs, completed, addedp));
}
void
@@ -763,10 +754,9 @@ siena_tx_qpost(
{
unsigned int added = *addedp;
unsigned int i;
- int rc = ENOSPC;
if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1))
- goto fail1;
+ return (ENOSPC);
for (i = 0; i < ndescs; i++) {
efx_buffer_t *ebp = &eb[i];
@@ -788,11 +778,6 @@ siena_tx_qpost(
*addedp = added;
return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
}
static void
diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c
index 16ea81d2..ca30e90f 100644
--- a/drivers/net/sfc/base/hunt_nic.c
+++ b/drivers/net/sfc/base/hunt_nic.c
@@ -20,7 +20,6 @@ hunt_nic_get_required_pcie_bandwidth(
__out uint32_t *bandwidth_mbpsp)
{
uint32_t port_modes;
- uint32_t max_port_mode;
uint32_t bandwidth;
efx_rc_t rc;
@@ -30,7 +29,8 @@ hunt_nic_get_required_pcie_bandwidth(
* capable mode is in use.
*/
- if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ NULL, NULL)) != 0) {
/* No port mode info available */
bandwidth = 0;
goto out;
@@ -46,17 +46,13 @@ hunt_nic_get_required_pcie_bandwidth(
goto fail1;
} else {
if (port_modes & (1U << TLV_PORT_MODE_40G)) {
- max_port_mode = TLV_PORT_MODE_40G;
+ bandwidth = 40000;
} else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) {
- max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
+ bandwidth = 4 * 10000;
} else {
/* Assume two 10G ports */
- max_port_mode = TLV_PORT_MODE_10G_10G;
+ bandwidth = 2 * 10000;
}
-
- if ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,
- &bandwidth)) != 0)
- goto fail2;
}
out:
@@ -64,8 +60,6 @@ out:
return (0);
-fail2:
- EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -189,6 +183,9 @@ hunt_board_cfg(
encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
+ /* Checksums for TSO sends can be incorrect on Huntington. */
+ encp->enc_bug61297_workaround = B_TRUE;
+
/* Alignment for receive packet DMA buffers */
encp->enc_rx_buf_align_start = 1;
encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
diff --git a/drivers/net/sfc/base/mc_driver_pcol_strs.h b/drivers/net/sfc/base/mc_driver_pcol_strs.h
new file mode 100644
index 00000000..73d633cb
--- /dev/null
+++ b/drivers/net/sfc/base/mc_driver_pcol_strs.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/*
+ * This file is automatically generated. DO NOT EDIT IT.
+ * To make changes, edit the .yml files under firmwaresrc doc/mcdi/ and
+ * rebuild this file with "make -C doc mcdiheaders".
+ *
+ * The version of this file has MCDI strings really used in the libefx.
+ */
+
+#ifndef _MC_DRIVER_PCOL_STRS_H
+#define _MC_DRIVER_PCOL_STRS_H
+
+#define MC_CMD_SENSOR_CONTROLLER_TEMP_ENUM_STR "Controller temperature: degC"
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP_ENUM_STR "Phy common temperature: degC"
+#define MC_CMD_SENSOR_CONTROLLER_COOLING_ENUM_STR "Controller cooling: bool"
+#define MC_CMD_SENSOR_PHY0_TEMP_ENUM_STR "Phy 0 temperature: degC"
+#define MC_CMD_SENSOR_PHY0_COOLING_ENUM_STR "Phy 0 cooling: bool"
+#define MC_CMD_SENSOR_PHY1_TEMP_ENUM_STR "Phy 1 temperature: degC"
+#define MC_CMD_SENSOR_PHY1_COOLING_ENUM_STR "Phy 1 cooling: bool"
+#define MC_CMD_SENSOR_IN_1V0_ENUM_STR "1.0v power: mV"
+#define MC_CMD_SENSOR_IN_1V2_ENUM_STR "1.2v power: mV"
+#define MC_CMD_SENSOR_IN_1V8_ENUM_STR "1.8v power: mV"
+#define MC_CMD_SENSOR_IN_2V5_ENUM_STR "2.5v power: mV"
+#define MC_CMD_SENSOR_IN_3V3_ENUM_STR "3.3v power: mV"
+#define MC_CMD_SENSOR_IN_12V0_ENUM_STR "12v power: mV"
+#define MC_CMD_SENSOR_IN_1V2A_ENUM_STR "1.2v analogue power: mV"
+#define MC_CMD_SENSOR_IN_VREF_ENUM_STR "reference voltage: mV"
+#define MC_CMD_SENSOR_OUT_VAOE_ENUM_STR "AOE FPGA power: mV"
+#define MC_CMD_SENSOR_AOE_TEMP_ENUM_STR "AOE FPGA temperature: degC"
+#define MC_CMD_SENSOR_PSU_AOE_TEMP_ENUM_STR "AOE FPGA PSU temperature: degC"
+#define MC_CMD_SENSOR_PSU_TEMP_ENUM_STR "AOE PSU temperature: degC"
+#define MC_CMD_SENSOR_FAN_0_ENUM_STR "Fan 0 speed: RPM"
+#define MC_CMD_SENSOR_FAN_1_ENUM_STR "Fan 1 speed: RPM"
+#define MC_CMD_SENSOR_FAN_2_ENUM_STR "Fan 2 speed: RPM"
+#define MC_CMD_SENSOR_FAN_3_ENUM_STR "Fan 3 speed: RPM"
+#define MC_CMD_SENSOR_FAN_4_ENUM_STR "Fan 4 speed: RPM"
+#define MC_CMD_SENSOR_IN_VAOE_ENUM_STR "AOE FPGA input power: mV"
+#define MC_CMD_SENSOR_OUT_IAOE_ENUM_STR "AOE FPGA current: mA"
+#define MC_CMD_SENSOR_IN_IAOE_ENUM_STR "AOE FPGA input current: mA"
+#define MC_CMD_SENSOR_NIC_POWER_ENUM_STR "NIC power consumption: W"
+#define MC_CMD_SENSOR_IN_0V9_ENUM_STR "0.9v power voltage: mV"
+#define MC_CMD_SENSOR_IN_I0V9_ENUM_STR "0.9v power current: mA"
+#define MC_CMD_SENSOR_IN_I1V2_ENUM_STR "1.2v power current: mA"
+#define MC_CMD_SENSOR_PAGE0_NEXT_ENUM_STR "Not a sensor: reserved for the next page flag"
+#define MC_CMD_SENSOR_IN_0V9_ADC_ENUM_STR "0.9v power voltage (at ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP_ENUM_STR "Controller temperature 2: degC"
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP_ENUM_STR "Voltage regulator internal temperature: degC"
+#define MC_CMD_SENSOR_VREG_0V9_TEMP_ENUM_STR "0.9V voltage regulator temperature: degC"
+#define MC_CMD_SENSOR_VREG_1V2_TEMP_ENUM_STR "1.2V voltage regulator temperature: degC"
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_ENUM_STR "controller internal temperature sensor voltage (internal ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_ENUM_STR "controller internal temperature (internal ADC): degC"
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC_ENUM_STR "controller internal temperature sensor voltage (external ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC_ENUM_STR "controller internal temperature (external ADC): degC"
+#define MC_CMD_SENSOR_AMBIENT_TEMP_ENUM_STR "ambient temperature: degC"
+#define MC_CMD_SENSOR_AIRFLOW_ENUM_STR "air flow: bool"
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_ENUM_STR "voltage between VSS08D and VSS08D at CSR: mV"
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC_ENUM_STR "voltage between VSS08D and VSS08D at CSR (external ADC): mV"
+#define MC_CMD_SENSOR_HOTPOINT_TEMP_ENUM_STR "Hotpoint temperature: degC"
+#define MC_CMD_SENSOR_PHY_POWER_PORT0_ENUM_STR "Port 0 PHY power switch over-current: bool"
+#define MC_CMD_SENSOR_PHY_POWER_PORT1_ENUM_STR "Port 1 PHY power switch over-current: bool"
+#define MC_CMD_SENSOR_MUM_VCC_ENUM_STR "Mop-up microcontroller reference voltage: mV"
+#define MC_CMD_SENSOR_IN_0V9_A_ENUM_STR "0.9v power phase A voltage: mV"
+#define MC_CMD_SENSOR_IN_I0V9_A_ENUM_STR "0.9v power phase A current: mA"
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP_ENUM_STR "0.9V voltage regulator phase A temperature: degC"
+#define MC_CMD_SENSOR_IN_0V9_B_ENUM_STR "0.9v power phase B voltage: mV"
+#define MC_CMD_SENSOR_IN_I0V9_B_ENUM_STR "0.9v power phase B current: mA"
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP_ENUM_STR "0.9V voltage regulator phase B temperature: degC"
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_ENUM_STR "CCOM AVREG 1v2 supply (interval ADC): mV"
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC_ENUM_STR "CCOM AVREG 1v2 supply (external ADC): mV"
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_ENUM_STR "CCOM AVREG 1v8 supply (interval ADC): mV"
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC_ENUM_STR "CCOM AVREG 1v8 supply (external ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_RTS_ENUM_STR "CCOM RTS temperature: degC"
+#define MC_CMD_SENSOR_PAGE1_NEXT_ENUM_STR "Not a sensor: reserved for the next page flag"
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_ENUM_STR "controller internal temperature sensor voltage on master core (internal ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_ENUM_STR "controller internal temperature on master core (internal ADC): degC"
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC_ENUM_STR "controller internal temperature sensor voltage on master core (external ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC_ENUM_STR "controller internal temperature on master core (external ADC): degC"
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_ENUM_STR "controller internal temperature on slave core sensor voltage (internal ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_ENUM_STR "controller internal temperature on slave core (internal ADC): degC"
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC_ENUM_STR "controller internal temperature on slave core sensor voltage (external ADC): mV"
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC_ENUM_STR "controller internal temperature on slave core (external ADC): degC"
+#define MC_CMD_SENSOR_SODIMM_VOUT_ENUM_STR "Voltage supplied to the SODIMMs from their power supply: mV"
+#define MC_CMD_SENSOR_SODIMM_0_TEMP_ENUM_STR "Temperature of SODIMM 0 (if installed): degC"
+#define MC_CMD_SENSOR_SODIMM_1_TEMP_ENUM_STR "Temperature of SODIMM 1 (if installed): degC"
+#define MC_CMD_SENSOR_PHY0_VCC_ENUM_STR "Voltage supplied to the QSFP #0 from their power supply: mV"
+#define MC_CMD_SENSOR_PHY1_VCC_ENUM_STR "Voltage supplied to the QSFP #1 from their power supply: mV"
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP_ENUM_STR "Controller die temperature (TDIODE): degC"
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP_ENUM_STR "Board temperature (front): degC"
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP_ENUM_STR "Board temperature (back): degC"
+#define MC_CMD_SENSOR_IN_I1V8_ENUM_STR "1.8v power current: mA"
+#define MC_CMD_SENSOR_IN_I2V5_ENUM_STR "2.5v power current: mA"
+#define MC_CMD_SENSOR_IN_I3V3_ENUM_STR "3.3v power current: mA"
+#define MC_CMD_SENSOR_IN_I12V0_ENUM_STR "12v power current: mA"
+#define MC_CMD_SENSOR_IN_1V3_ENUM_STR "1.3v power: mV"
+#define MC_CMD_SENSOR_IN_I1V3_ENUM_STR "1.3v power current: mA"
+
+#endif /* _MC_DRIVER_PCOL_STRS_H */
diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c
index 940bd026..b53de0d6 100644
--- a/drivers/net/sfc/base/mcdi_mon.c
+++ b/drivers/net/sfc/base/mcdi_mon.c
@@ -6,141 +6,15 @@
#include "efx.h"
#include "efx_impl.h"
+#include "mcdi_mon.h"
#if EFSYS_OPT_MON_MCDI
#if EFSYS_OPT_MON_STATS
-#define MCDI_MON_NEXT_PAGE ((uint16_t)0xfffe)
-#define MCDI_MON_INVALID_SENSOR ((uint16_t)0xfffd)
-#define MCDI_MON_PAGE_SIZE 0x20
-
-/* Bitmasks of valid port(s) for each sensor */
-#define MCDI_MON_PORT_NONE (0x00)
-#define MCDI_MON_PORT_P1 (0x01)
-#define MCDI_MON_PORT_P2 (0x02)
-#define MCDI_MON_PORT_P3 (0x04)
-#define MCDI_MON_PORT_P4 (0x08)
-#define MCDI_MON_PORT_Px (0xFFFF)
-
/* Get port mask from one-based MCDI port number */
#define MCDI_MON_PORT_MASK(_emip) (1U << ((_emip)->emi_port - 1))
-/* Entry for MCDI sensor in sensor map */
-#define STAT(portmask, stat) \
- { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) }
-
-/* Entry for sensor next page flag in sensor map */
-#define STAT_NEXT_PAGE() \
- { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE }
-
-/* Placeholder for gaps in the array */
-#define STAT_NO_SENSOR() \
- { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR }
-
-/* Map from MC sensors to monitor statistics */
-static const struct mcdi_sensor_map_s {
- uint16_t msm_port_mask;
- uint16_t msm_stat;
-} mcdi_sensor_map[] = {
- /* Sensor page 0 MC_CMD_SENSOR_xxx */
- STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */
- STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */
- STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */
- STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */
- STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */
- STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */
- STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */
- STAT(Px, 1V), /* 0x07 IN_1V0 */
- STAT(Px, 1_2V), /* 0x08 IN_1V2 */
- STAT(Px, 1_8V), /* 0x09 IN_1V8 */
- STAT(Px, 2_5V), /* 0x0a IN_2V5 */
- STAT(Px, 3_3V), /* 0x0b IN_3V3 */
- STAT(Px, 12V), /* 0x0c IN_12V0 */
- STAT(Px, 1_2VA), /* 0x0d IN_1V2A */
- STAT(Px, VREF), /* 0x0e IN_VREF */
- STAT(Px, VAOE), /* 0x0f OUT_VAOE */
- STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */
- STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */
- STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */
- STAT(Px, FAN0), /* 0x13 FAN_0 */
- STAT(Px, FAN1), /* 0x14 FAN_1 */
- STAT(Px, FAN2), /* 0x15 FAN_2 */
- STAT(Px, FAN3), /* 0x16 FAN_3 */
- STAT(Px, FAN4), /* 0x17 FAN_4 */
- STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */
- STAT(Px, IAOE), /* 0x19 OUT_IAOE */
- STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */
- STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */
- STAT(Px, 0_9V), /* 0x1c IN_0V9 */
- STAT(Px, I0_9V), /* 0x1d IN_I0V9 */
- STAT(Px, I1_2V), /* 0x1e IN_I1V2 */
- STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */
-
- /* Sensor page 1 MC_CMD_SENSOR_xxx */
- STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */
- STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */
- STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */
- STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */
- STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */
- STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */
- STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */
- STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */
- STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */
- STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */
- STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */
- STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */
- STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */
- STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */
- STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */
- STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */
- STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */
- STAT(Px, 0V9_A), /* 0x31 0V9_A */
- STAT(Px, I0V9_A), /* 0x32 I0V9_A */
- STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */
- STAT(Px, 0V9_B), /* 0x34 0V9_B */
- STAT(Px, I0V9_B), /* 0x35 I0V9_B */
- STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */
- STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */
- STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC),
- /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */
- STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */
- STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC),
- /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */
- STAT_NO_SENSOR(), /* 0x3b (no sensor) */
- STAT_NO_SENSOR(), /* 0x3c (no sensor) */
- STAT_NO_SENSOR(), /* 0x3d (no sensor) */
- STAT_NO_SENSOR(), /* 0x3e (no sensor) */
- STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */
-
- /* Sensor page 2 MC_CMD_SENSOR_xxx */
- STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */
- STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */
- STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */
- STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC),
- /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */
- STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */
- STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */
- STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */
- STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC),
- /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */
- STAT_NO_SENSOR(), /* 0x48 (no sensor) */
- STAT(Px, SODIMM_VOUT), /* 0x49 SODIMM_VOUT */
- STAT(Px, SODIMM_0_TEMP), /* 0x4a SODIMM_0_TEMP */
- STAT(Px, SODIMM_1_TEMP), /* 0x4b SODIMM_1_TEMP */
- STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */
- STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */
- STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
- STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
- STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
- STAT(Px, I1V8), /* 0x51 IN_I1V8 */
- STAT(Px, I2V5), /* 0x52 IN_I2V5 */
- STAT(Px, I3V3), /* 0x53 IN_I3V3 */
- STAT(Px, I12V0), /* 0x54 IN_I12V0 */
- STAT(Px, 1_3V), /* 0x55 IN_1V3 */
- STAT(Px, I1V3), /* 0x56 IN_I1V3 */
-};
-
#define MCDI_STATIC_SENSOR_ASSERT(_field) \
EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
== EFX_MON_STAT_STATE_ ## _field)
@@ -155,10 +29,10 @@ mcdi_mon_decode_stats(
__inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat)
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
- uint16_t port_mask;
+ efx_mon_stat_portmask_t port_mask;
uint16_t sensor;
size_t sensor_max;
- uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32];
+ uint32_t stat_mask[(EFX_MON_NSTATS + 31) / 32];
uint32_t idx = 0;
uint32_t page = 0;
@@ -169,13 +43,10 @@ mcdi_mon_decode_stats(
MCDI_STATIC_SENSOR_ASSERT(BROKEN);
MCDI_STATIC_SENSOR_ASSERT(NO_READING);
- EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 ==
- EFX_MON_MASK_ELEMENT_SIZE);
- sensor_max =
- MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map));
+ sensor_max = 8 * sensor_mask_size;
EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
- port_mask = MCDI_MON_PORT_MASK(emip);
+ port_mask = (efx_mon_stat_portmask_t)MCDI_MON_PORT_MASK(emip);
memset(stat_mask, 0, sizeof (stat_mask));
@@ -190,19 +61,36 @@ mcdi_mon_decode_stats(
* does not understand.
*/
for (sensor = 0; sensor < sensor_max; ++sensor) {
- efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat;
+ efx_mon_stat_t id;
+ efx_mon_stat_portmask_t stat_portmask = 0;
+ boolean_t decode_ok;
+ efx_mon_stat_unit_t stat_unit;
- if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) {
- EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE);
+ if ((sensor % (MC_CMD_SENSOR_PAGE0_NEXT + 1)) ==
+ MC_CMD_SENSOR_PAGE0_NEXT) {
page++;
continue;
+ /* This sensor is one of the page boundary bits. */
}
+
if (~(sensor_mask[page]) & (1U << sensor))
continue;
+ /* This sensor not in DMA buffer */
+
idx++;
+ /*
+ * Valid stat in DMA buffer that we need to increment over, even
+ * if we couldn't look up the id
+ */
+
+ decode_ok = efx_mon_mcdi_to_efx_stat(sensor, &id);
+ decode_ok =
+ decode_ok && efx_mon_get_stat_portmap(id, &stat_portmask);
- if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ if (!(decode_ok && (stat_portmask & port_mask)))
continue;
+ /* Either bad decode, or don't know what port stat is on */
+
EFSYS_ASSERT(id < EFX_MON_NSTATS);
/*
@@ -228,6 +116,10 @@ mcdi_mon_decode_stats(
stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword,
MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+
+ stat[id].emsv_unit =
+ efx_mon_get_stat_unit(id, &stat_unit) ?
+ stat_unit : EFX_MON_STAT_UNIT_UNKNOWN;
}
}
@@ -244,7 +136,7 @@ mcdi_mon_ev(
__out efx_mon_stat_value_t *valuep)
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
- uint16_t port_mask;
+ efx_mon_stat_portmask_t port_mask, sensor_port_mask;
uint16_t sensor;
uint16_t state;
uint16_t value;
@@ -261,20 +153,22 @@ mcdi_mon_ev(
/* Hardware must support this MCDI sensor */
EFSYS_ASSERT3U(sensor, <,
(8 * enp->en_nic_cfg.enc_mcdi_sensor_mask_size));
- EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT);
+ EFSYS_ASSERT((sensor % (MC_CMD_SENSOR_PAGE0_NEXT + 1)) !=
+ MC_CMD_SENSOR_PAGE0_NEXT);
EFSYS_ASSERT(enp->en_nic_cfg.enc_mcdi_sensor_maskp != NULL);
- EFSYS_ASSERT(
- (enp->en_nic_cfg.enc_mcdi_sensor_maskp[sensor/MCDI_MON_PAGE_SIZE] &
- (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
+ EFSYS_ASSERT((enp->en_nic_cfg.enc_mcdi_sensor_maskp[
+ sensor / (MC_CMD_SENSOR_PAGE0_NEXT + 1)] &
+ (1U << (sensor % (MC_CMD_SENSOR_PAGE0_NEXT + 1)))) != 0);
- /* But we don't have to understand it */
- if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) {
+ /* And we need to understand it, to get port-map */
+ if (!efx_mon_mcdi_to_efx_stat(sensor, &id)) {
rc = ENOTSUP;
goto fail1;
}
- id = mcdi_sensor_map[sensor].msm_stat;
- if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ if (!(efx_mon_get_stat_portmap(id, &sensor_port_mask) &&
+ (port_mask && sensor_port_mask))) {
return (ENODEV);
+ }
EFSYS_ASSERT(id < EFX_MON_NSTATS);
*idp = id;
@@ -297,9 +191,15 @@ efx_mcdi_read_sensors(
__in uint32_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN,
- MC_CMD_READ_SENSORS_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_READ_SENSORS_EXT_IN_LEN,
+ MC_CMD_READ_SENSORS_EXT_OUT_LEN);
uint32_t addr_lo, addr_hi;
+ efx_rc_t rc;
+
+ if (EFSYS_MEM_SIZE(esmp) < size) {
+ rc = EINVAL;
+ goto fail1;
+ }
req.emr_cmd = MC_CMD_READ_SENSORS;
req.emr_in_buf = payload;
@@ -317,6 +217,11 @@ efx_mcdi_read_sensors(
efx_mcdi_execute(enp, &req);
return (req.emr_rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
static __checkReturn efx_rc_t
@@ -325,8 +230,8 @@ efx_mcdi_sensor_info_npages(
__out uint32_t *npagesp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
- MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX);
int page;
efx_rc_t rc;
@@ -369,8 +274,8 @@ efx_mcdi_sensor_info(
__in size_t npages)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
- MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX);
uint32_t page;
efx_rc_t rc;
@@ -429,6 +334,86 @@ fail1:
return (rc);
}
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info_page(
+ __in efx_nic_t *enp,
+ __in uint32_t page,
+ __out uint32_t *mask_part,
+ __out_ecount((sizeof (*mask_part) * 8) - 1)
+ efx_mon_stat_limits_t *limits)
+{
+ efx_mcdi_req_t req;
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX);
+ efx_rc_t rc;
+ uint32_t mask_copy;
+ efx_dword_t *maskp;
+ efx_qword_t *limit_info;
+
+ EFSYS_ASSERT(mask_part != NULL);
+ EFSYS_ASSERT(limits != NULL);
+
+ memset(limits, 0,
+ ((sizeof (*mask_part) * 8) - 1) * sizeof (efx_mon_stat_limits_t));
+
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ efx_mcdi_execute(enp, &req);
+
+ rc = req.emr_rc;
+
+ if (rc != 0)
+ goto fail1;
+
+ EFSYS_ASSERT(sizeof (*limit_info) ==
+ MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN);
+ maskp = MCDI_OUT2(req, efx_dword_t, SENSOR_INFO_OUT_MASK);
+ limit_info = (efx_qword_t *)(maskp + 1);
+
+ *mask_part = maskp->ed_u32[0];
+ mask_copy = *mask_part;
+
+ /* Copy an entry for all but the highest bit set. */
+ while (mask_copy) {
+
+ if (mask_copy == (1U << MC_CMD_SENSOR_PAGE0_NEXT)) {
+ /* Only next page bit set. */
+ mask_copy = 0;
+ } else {
+ /* Clear lowest bit */
+ mask_copy = mask_copy & ~(mask_copy ^ (mask_copy - 1));
+ /* And copy out limit entry into buffer */
+ limits->emlv_warning_min = EFX_QWORD_FIELD(*limit_info,
+ MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1);
+
+ limits->emlv_warning_max = EFX_QWORD_FIELD(*limit_info,
+ MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1);
+
+ limits->emlv_fatal_min = EFX_QWORD_FIELD(*limit_info,
+ MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2);
+
+ limits->emlv_fatal_max = EFX_QWORD_FIELD(*limit_info,
+ MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2);
+
+ limits++;
+ limit_info++;
+ }
+ }
+
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
mcdi_mon_stats_update(
__in efx_nic_t *enp,
@@ -457,6 +442,96 @@ fail1:
return (rc);
}
+static void
+lowest_set_bit(
+ __in uint32_t input_mask,
+ __out uint32_t *lowest_bit_mask,
+ __out uint32_t *lowest_bit_num
+)
+{
+ uint32_t x;
+ uint32_t set_bit, bit_index;
+
+ x = (input_mask ^ (input_mask - 1));
+ set_bit = (x + 1) >> 1;
+ if (!set_bit)
+ set_bit = (1U << 31U);
+
+ bit_index = 0;
+ if (set_bit & 0xFFFF0000)
+ bit_index += 16;
+ if (set_bit & 0xFF00FF00)
+ bit_index += 8;
+ if (set_bit & 0xF0F0F0F0)
+ bit_index += 4;
+ if (set_bit & 0xCCCCCCCC)
+ bit_index += 2;
+ if (set_bit & 0xAAAAAAAA)
+ bit_index += 1;
+
+ *lowest_bit_mask = set_bit;
+ *lowest_bit_num = bit_index;
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_limits_update(
+ __in efx_nic_t *enp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values)
+{
+ efx_rc_t rc;
+ uint32_t page;
+ uint32_t page_mask;
+ uint32_t limit_index;
+ efx_mon_stat_limits_t limits[sizeof (page_mask) * 8];
+ efx_mon_stat_t stat;
+
+ page = 0;
+ page--;
+ do {
+ page++;
+
+ rc = efx_mcdi_sensor_info_page(enp, page, &page_mask, limits);
+ if (rc != 0)
+ goto fail1;
+
+ limit_index = 0;
+ while (page_mask) {
+ uint32_t set_bit;
+ uint32_t page_index;
+ uint32_t mcdi_index;
+
+ if (page_mask == (1U << MC_CMD_SENSOR_PAGE0_NEXT))
+ break;
+
+ lowest_set_bit(page_mask, &set_bit, &page_index);
+ page_mask = page_mask & ~set_bit;
+
+ mcdi_index =
+ page_index + (sizeof (page_mask) * 8 * page);
+
+ /*
+ * This can fail if MCDI reports newer stats than the
+ * drivers understand, or the bit is the next page bit.
+ *
+ * Driver needs to be tolerant of this.
+ */
+ if (!efx_mon_mcdi_to_efx_stat(mcdi_index, &stat))
+ continue;
+
+ values[stat] = limits[limit_index];
+ limit_index++;
+ }
+
+ } while (page_mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
mcdi_mon_cfg_build(
__in efx_nic_t *enp)
diff --git a/drivers/net/sfc/base/mcdi_mon.h b/drivers/net/sfc/base/mcdi_mon.h
index 5aa6a6a2..5eba0901 100644
--- a/drivers/net/sfc/base/mcdi_mon.h
+++ b/drivers/net/sfc/base/mcdi_mon.h
@@ -39,6 +39,11 @@ mcdi_mon_stats_update(
__in efsys_mem_t *esmp,
__inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+extern __checkReturn efx_rc_t
+mcdi_mon_limits_update(
+ __in efx_nic_t *enp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_limits_t *values);
+
#endif /* EFSYS_OPT_MON_STATS */
#endif /* EFSYS_OPT_MON_MCDI */
diff --git a/drivers/net/sfc/base/medford2_nic.c b/drivers/net/sfc/base/medford2_nic.c
index 7f5ad175..6bc1e87c 100644
--- a/drivers/net/sfc/base/medford2_nic.c
+++ b/drivers/net/sfc/base/medford2_nic.c
@@ -15,25 +15,15 @@ medford2_nic_get_required_pcie_bandwidth(
__in efx_nic_t *enp,
__out uint32_t *bandwidth_mbpsp)
{
- uint32_t port_modes;
- uint32_t current_mode;
uint32_t bandwidth;
efx_rc_t rc;
/* FIXME: support new Medford2 dynamic port modes */
- if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
- &current_mode)) != 0) {
- /* No port mode info available. */
- bandwidth = 0;
- goto out;
- }
-
- if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ if ((rc = ef10_nic_get_port_mode_bandwidth(enp,
&bandwidth)) != 0)
goto fail1;
-out:
*bandwidth_mbpsp = bandwidth;
return (0);
@@ -96,6 +86,9 @@ medford2_board_cfg(
else
goto fail1;
+ /* Checksums for TSO sends should always be correct on Medford2. */
+ encp->enc_bug61297_workaround = B_FALSE;
+
/* Get clock frequencies (in MHz). */
if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
goto fail2;
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
index 6dc895f5..bfe01ca9 100644
--- a/drivers/net/sfc/base/medford_nic.c
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -15,23 +15,13 @@ medford_nic_get_required_pcie_bandwidth(
__in efx_nic_t *enp,
__out uint32_t *bandwidth_mbpsp)
{
- uint32_t port_modes;
- uint32_t current_mode;
uint32_t bandwidth;
efx_rc_t rc;
- if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
- &current_mode)) != 0) {
- /* No port mode info available. */
- bandwidth = 0;
- goto out;
- }
-
- if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ if ((rc = ef10_nic_get_port_mode_bandwidth(enp,
&bandwidth)) != 0)
goto fail1;
-out:
*bandwidth_mbpsp = bandwidth;
return (0);
@@ -94,6 +84,9 @@ medford_board_cfg(
else
goto fail1;
+ /* Checksums for TSO sends can be incorrect on Medford. */
+ encp->enc_bug61297_workaround = B_TRUE;
+
/* Get clock frequencies (in MHz). */
if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
goto fail2;
diff --git a/drivers/net/sfc/base/meson.build b/drivers/net/sfc/base/meson.build
index da2bf44d..ab66f32f 100644
--- a/drivers/net/sfc/base/meson.build
+++ b/drivers/net/sfc/base/meson.build
@@ -58,6 +58,9 @@ extra_flags = [
]
c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
foreach flag: extra_flags
if cc.has_argument(flag)
c_args += flag
diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c
index f8857cdd..928dfc34 100644
--- a/drivers/net/sfc/base/siena_mac.c
+++ b/drivers/net/sfc/base/siena_mac.c
@@ -68,14 +68,13 @@ siena_mac_reconfigure(
efx_port_t *epp = &(enp->en_port);
efx_oword_t multicast_hash[2];
efx_mcdi_req_t req;
- uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN,
- MC_CMD_SET_MAC_OUT_LEN),
- MAX(MC_CMD_SET_MCAST_HASH_IN_LEN,
- MC_CMD_SET_MCAST_HASH_OUT_LEN))];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MAX(MC_CMD_SET_MAC_IN_LEN, MC_CMD_SET_MCAST_HASH_IN_LEN),
+ MAX(MC_CMD_SET_MAC_OUT_LEN, MC_CMD_SET_MCAST_HASH_OUT_LEN));
+
unsigned int fcntl;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
index 31eef80b..fca17171 100644
--- a/drivers/net/sfc/base/siena_nic.c
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -18,11 +18,10 @@ siena_nic_get_partn_mask(
__out unsigned int *maskp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN,
- MC_CMD_NVRAM_TYPES_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_TYPES_IN_LEN,
+ MC_CMD_NVRAM_TYPES_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_TYPES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN;
@@ -115,6 +114,7 @@ siena_board_cfg(
/* Alignment for WPTR updates */
encp->enc_rx_push_align = 1;
+#if EFSYS_OPT_RX_SCALE
/* There is one RSS context per function */
encp->enc_rx_scale_max_exclusive_contexts = 1;
@@ -129,6 +129,7 @@ siena_board_cfg(
/* There is no support for additional RSS modes */
encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+#endif /* EFSYS_OPT_RX_SCALE */
encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
/* Fragments must not span 4k boundaries. */
diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c
index 8cdd2df7..b8ea8a75 100644
--- a/drivers/net/sfc/base/siena_nvram.c
+++ b/drivers/net/sfc/base/siena_nvram.c
@@ -418,12 +418,11 @@ siena_nvram_get_subtype(
__out uint32_t *subtypep)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
- MC_CMD_GET_BOARD_CFG_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
efx_word_t *fw_list;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_BOARD_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c
index 4b2190d3..7eec9c74 100644
--- a/drivers/net/sfc/base/siena_phy.c
+++ b/drivers/net/sfc/base/siena_phy.c
@@ -169,11 +169,10 @@ siena_phy_get_link(
__out siena_link_state_t *slsp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
- MC_CMD_GET_LINK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
@@ -244,10 +243,9 @@ siena_phy_reconfigure(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN,
- MC_CMD_SET_ID_LED_OUT_LEN),
- MAX(MC_CMD_SET_LINK_IN_LEN,
- MC_CMD_SET_LINK_OUT_LEN))];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MAX(MC_CMD_SET_ID_LED_IN_LEN, MC_CMD_SET_LINK_IN_LEN),
+ MAX(MC_CMD_SET_ID_LED_OUT_LEN, MC_CMD_SET_LINK_OUT_LEN));
uint32_t cap_mask;
#if EFSYS_OPT_PHY_LED_CONTROL
unsigned int led_mode;
@@ -255,7 +253,6 @@ siena_phy_reconfigure(
unsigned int speed;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
@@ -361,12 +358,11 @@ siena_phy_verify(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
- MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN);
uint32_t state;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
@@ -530,8 +526,8 @@ siena_phy_stats_update(
uint32_t vmask = encp->enc_mcdi_phy_stat_mask;
uint64_t smask;
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN,
- MC_CMD_PHY_STATS_OUT_DMA_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PHY_STATS_IN_LEN,
+ MC_CMD_PHY_STATS_OUT_DMA_LEN);
efx_rc_t rc;
if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_PHY_STATS_SIZE)) {
@@ -539,7 +535,6 @@ siena_phy_stats_update(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_PHY_STATS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN;
@@ -626,14 +621,13 @@ siena_phy_bist_poll(
__in size_t count)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
- MCDI_CTL_SDU_LEN_MAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX);
uint32_t value_mask = 0;
efx_mcdi_req_t req;
uint32_t result;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_POLL_BIST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h
index b9d2df58..0b4795da 100644
--- a/drivers/net/sfc/efsys.h
+++ b/drivers/net/sfc/efsys.h
@@ -48,10 +48,6 @@ extern "C" {
#include "efx_types.h"
-#ifndef _NOTE
-#define _NOTE(s)
-#endif
-
typedef bool boolean_t;
#ifndef B_FALSE
@@ -106,40 +102,6 @@ prefetch_read_once(const volatile void *addr)
rte_prefetch_non_temporal(addr);
}
-/* Modifiers used for Windows builds */
-#define __in
-#define __in_opt
-#define __in_ecount(_n)
-#define __in_ecount_opt(_n)
-#define __in_bcount(_n)
-#define __in_bcount_opt(_n)
-
-#define __out
-#define __out_opt
-#define __out_ecount(_n)
-#define __out_ecount_opt(_n)
-#define __out_bcount(_n)
-#define __out_bcount_opt(_n)
-#define __out_bcount_part(_n, _l)
-#define __out_bcount_part_opt(_n, _l)
-
-#define __deref_out
-
-#define __inout
-#define __inout_opt
-#define __inout_ecount(_n)
-#define __inout_ecount_opt(_n)
-#define __inout_bcount(_n)
-#define __inout_bcount_opt(_n)
-#define __inout_bcount_full_opt(_n)
-
-#define __deref_out_bcount_opt(n)
-
-#define __checkReturn
-#define __success(_x)
-
-#define __drv_when(_p, _c)
-
/* Code inclusion options */
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index eda9676c..c246871c 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -57,6 +57,11 @@ struct sfc_dp_tx_qcreate_info {
volatile void *mem_bar;
/** VI window size shift */
unsigned int vi_window_shift;
+ /**
+ * Maximum number of bytes into the packet the TCP header can start for
+ * the hardware to apply TSO packet edits.
+ */
+ uint16_t tso_tcp_header_offset_limit;
};
/**
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 81c8f7fb..a24f54e7 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -123,14 +123,22 @@ static struct rte_mbuf *
sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
struct rte_mbuf *mbuf)
{
- return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+ struct rte_mbuf *m;
+
+ m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+ MBUF_RAW_ALLOC_CHECK(m);
+ return m;
}
static struct rte_mbuf *
sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
struct rte_mbuf *mbuf, unsigned int idx)
{
- return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+ struct rte_mbuf *m;
+
+ m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+ MBUF_RAW_ALLOC_CHECK(m);
+ return m;
}
static struct rte_mbuf *
@@ -324,7 +332,7 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
/* Buffers to be discarded have 0 in packet type */
if (unlikely(m->packet_type == 0)) {
- rte_mempool_put(rxq->refill_mb_pool, m);
+ rte_mbuf_raw_free(m);
goto next_buf;
}
@@ -687,7 +695,7 @@ sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
rxq->block_size - rxq->left_in_completed);
while (rxq->left_in_completed > 0) {
- rte_mempool_put(rxq->refill_mb_pool, m);
+ rte_mbuf_raw_free(m);
m = sfc_ef10_essb_next_mbuf(rxq, m);
rxq->left_in_completed--;
}
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 6a5052b9..77ca580b 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -57,12 +57,13 @@ struct sfc_ef10_rxq {
#define SFC_EF10_RXQ_EXCEPTION 0x4
#define SFC_EF10_RXQ_RSS_HASH 0x8
unsigned int ptr_mask;
- unsigned int prepared;
+ unsigned int pending;
unsigned int completed;
unsigned int evq_read_ptr;
efx_qword_t *evq_hw_ring;
struct sfc_ef10_rx_sw_desc *sw_ring;
uint64_t rearm_data;
+ struct rte_mbuf *scatter_pkt;
uint16_t prefix_size;
/* Used on refill */
@@ -133,6 +134,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
struct sfc_ef10_rx_sw_desc *rxd;
rte_iova_t phys_addr;
+ MBUF_RAW_ALLOC_CHECK(m);
+
SFC_ASSERT((id & ~ptr_mask) == 0);
rxd = &rxq->sw_ring[id];
rxd->mbuf = m;
@@ -184,21 +187,26 @@ sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
}
}
-static uint16_t
-sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static struct rte_mbuf **
+sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
- uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->prepared);
- unsigned int completed = rxq->completed;
- unsigned int i;
+ uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed);
- rxq->prepared -= n_rx_pkts;
- rxq->completed = completed + n_rx_pkts;
+ SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL);
- for (i = 0; i < n_rx_pkts; ++i, ++completed)
- rx_pkts[i] = rxq->sw_ring[completed & rxq->ptr_mask].mbuf;
+ if (n_rx_pkts != 0) {
+ unsigned int completed = rxq->completed;
+
+ rxq->completed = completed + n_rx_pkts;
+
+ do {
+ *rx_pkts++ =
+ rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf;
+ } while (completed != rxq->completed);
+ }
- return n_rx_pkts;
+ return rx_pkts;
}
static uint16_t
@@ -213,47 +221,80 @@ sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
}
-static uint16_t
+static struct rte_mbuf **
sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
- struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+ struct rte_mbuf **rx_pkts,
+ struct rte_mbuf ** const rx_pkts_end)
{
const unsigned int ptr_mask = rxq->ptr_mask;
- unsigned int completed = rxq->completed;
+ unsigned int pending = rxq->pending;
unsigned int ready;
struct sfc_ef10_rx_sw_desc *rxd;
struct rte_mbuf *m;
struct rte_mbuf *m0;
- uint16_t n_rx_pkts;
const uint8_t *pseudo_hdr;
- uint16_t pkt_len;
+ uint16_t seg_len;
- ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - completed) &
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) &
EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
- SFC_ASSERT(ready > 0);
+
+ if (ready == 0) {
+ /* Rx abort - it was no enough descriptors for Rx packet */
+ rte_pktmbuf_free(rxq->scatter_pkt);
+ rxq->scatter_pkt = NULL;
+ return rx_pkts;
+ }
+
+ rxq->pending = pending + ready;
if (rx_ev.eq_u64[0] &
rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
(1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
- SFC_ASSERT(rxq->prepared == 0);
- rxq->completed += ready;
- while (ready-- > 0) {
- rxd = &rxq->sw_ring[completed++ & ptr_mask];
- rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
- }
- return 0;
+ SFC_ASSERT(rxq->completed == pending);
+ do {
+ rxd = &rxq->sw_ring[pending++ & ptr_mask];
+ rte_mbuf_raw_free(rxd->mbuf);
+ } while (pending != rxq->pending);
+ rxq->completed = pending;
+ return rx_pkts;
}
- n_rx_pkts = RTE_MIN(ready, nb_pkts);
- rxq->prepared = ready - n_rx_pkts;
- rxq->completed += n_rx_pkts;
+ /* If scattered packet is in progress */
+ if (rxq->scatter_pkt != NULL) {
+ /* Events for scattered packet frags are not merged */
+ SFC_ASSERT(ready == 1);
+ SFC_ASSERT(rxq->completed == pending);
- rxd = &rxq->sw_ring[completed++ & ptr_mask];
+ /* There is no pseudo-header in scatter segments. */
+ seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES);
- sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+ rxd = &rxq->sw_ring[pending++ & ptr_mask];
+ m = rxd->mbuf;
- m = rxd->mbuf;
+ MBUF_RAW_ALLOC_CHECK(m);
+
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_pktmbuf_data_len(m) = seg_len;
+ rte_pktmbuf_pkt_len(m) = seg_len;
- *rx_pkts++ = m;
+ rxq->scatter_pkt->nb_segs++;
+ rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len;
+ rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m;
+
+ if (~rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
+ *rx_pkts++ = rxq->scatter_pkt;
+ rxq->scatter_pkt = NULL;
+ }
+ rxq->completed = pending;
+ return rx_pkts;
+ }
+
+ rxd = &rxq->sw_ring[pending++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
+
+ m = rxd->mbuf;
RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
m->rearm_data[0] = rxq->rearm_data;
@@ -275,27 +316,40 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
if (ready == 1)
- pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
+ seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
rxq->prefix_size;
else
- pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
- SFC_ASSERT(pkt_len > 0);
- rte_pktmbuf_data_len(m) = pkt_len;
- rte_pktmbuf_pkt_len(m) = pkt_len;
+ seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(seg_len > 0);
+ rte_pktmbuf_data_len(m) = seg_len;
+ rte_pktmbuf_pkt_len(m) = seg_len;
SFC_ASSERT(m->next == NULL);
+ if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
+ *rx_pkts++ = m;
+ rxq->completed = pending;
+ } else {
+ /* Events with CONT bit are not merged */
+ SFC_ASSERT(ready == 1);
+ rxq->scatter_pkt = m;
+ rxq->completed = pending;
+ return rx_pkts;
+ }
+
/* Remember mbuf to copy offload flags and packet type from */
m0 = m;
- for (--ready; ready > 0; --ready) {
- rxd = &rxq->sw_ring[completed++ & ptr_mask];
+ while (pending != rxq->pending) {
+ rxd = &rxq->sw_ring[pending++ & ptr_mask];
- sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+ sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
m = rxd->mbuf;
- if (ready > rxq->prepared)
+ if (rx_pkts != rx_pkts_end) {
*rx_pkts++ = m;
+ rxq->completed = pending;
+ }
RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
sizeof(rxq->rearm_data));
@@ -315,15 +369,15 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
*/
m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
- pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
- SFC_ASSERT(pkt_len > 0);
- rte_pktmbuf_data_len(m) = pkt_len;
- rte_pktmbuf_pkt_len(m) = pkt_len;
+ seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(seg_len > 0);
+ rte_pktmbuf_data_len(m) = seg_len;
+ rte_pktmbuf_pkt_len(m) = seg_len;
SFC_ASSERT(m->next == NULL);
}
- return n_rx_pkts;
+ return rx_pkts;
}
static bool
@@ -355,26 +409,25 @@ static uint16_t
sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
+ struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
unsigned int evq_old_read_ptr;
- uint16_t n_rx_pkts;
efx_qword_t rx_ev;
+ rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts);
+
if (unlikely(rxq->flags &
(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
- return 0;
-
- n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
+ goto done;
evq_old_read_ptr = rxq->evq_read_ptr;
- while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
+ while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
/*
* DROP_EVENT is an internal to the NIC, software should
* never see it and, therefore, may ignore it.
*/
- n_rx_pkts += sfc_ef10_rx_process_event(rxq, rx_ev,
- rx_pkts + n_rx_pkts,
- nb_pkts - n_rx_pkts);
+ rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev,
+ rx_pkts, rx_pkts_end);
}
sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
@@ -383,7 +436,8 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
/* It is not a problem if we refill in the case of exception */
sfc_ef10_rx_qrefill(rxq);
- return n_rx_pkts;
+done:
+ return nb_pkts - (rx_pkts_end - rx_pkts);
}
const uint32_t *
@@ -446,21 +500,53 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
static unsigned int
-sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+ efx_qword_t rx_ev;
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ unsigned int pending = rxq->pending;
+ unsigned int ready;
+
+ if (unlikely(rxq->flags &
+ (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
+ goto done;
+
+ while (sfc_ef10_rx_get_event(rxq, &rx_ev)) {
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
+ pending) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ pending += ready;
+ }
+
/*
- * Correct implementation requires EvQ polling and events
- * processing (keeping all ready mbufs in prepared).
+ * The function does not process events, so return event queue read
+ * pointer to the original position to allow the events that were
+ * read to be processed later
*/
- return -ENOTSUP;
+ rxq->evq_read_ptr = evq_old_read_ptr;
+
+done:
+ return pending - rxq->completed;
}
static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
static int
-sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
- __rte_unused uint16_t offset)
+sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
{
- return -ENOTSUP;
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+ unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq);
+
+ if (unlikely(offset > rxq->ptr_mask))
+ return -EINVAL;
+
+ if (offset < npending)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed))
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
}
@@ -594,8 +680,9 @@ sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
{
struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
- rxq->prepared = 0;
- rxq->completed = rxq->added = 0;
+ SFC_ASSERT(rxq->completed == 0);
+ SFC_ASSERT(rxq->pending == 0);
+ SFC_ASSERT(rxq->added == 0);
sfc_ef10_rx_qrefill(rxq);
@@ -642,12 +729,17 @@ sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
unsigned int i;
struct sfc_ef10_rx_sw_desc *rxd;
+ rte_pktmbuf_free(rxq->scatter_pkt);
+ rxq->scatter_pkt = NULL;
+
for (i = rxq->completed; i != rxq->added; ++i) {
rxd = &rxq->sw_ring[i & rxq->ptr_mask];
- rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rte_mbuf_raw_free(rxd->mbuf);
rxd->mbuf = NULL;
}
+ rxq->completed = rxq->pending = rxq->added = 0;
+
rxq->flags &= ~SFC_EF10_RXQ_STARTED;
}
@@ -657,7 +749,8 @@ struct sfc_dp_rx sfc_ef10_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
+ .features = SFC_DP_RX_FEAT_SCATTER |
+ SFC_DP_RX_FEAT_MULTI_PROCESS |
SFC_DP_RX_FEAT_TUNNELS |
SFC_DP_RX_FEAT_CHECKSUM,
.get_dev_info = sfc_ef10_rx_get_dev_info,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index d0daa3b3..bcd3153f 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -11,6 +11,8 @@
#include <rte_mbuf.h>
#include <rte_io.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
#include "efx.h"
#include "efx_types.h"
@@ -21,6 +23,7 @@
#include "sfc_tweak.h"
#include "sfc_kvargs.h"
#include "sfc_ef10.h"
+#include "sfc_tso.h"
#define sfc_ef10_tx_err(dpq, ...) \
SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
@@ -62,6 +65,9 @@ struct sfc_ef10_txq {
efx_qword_t *txq_hw_ring;
volatile void *doorbell;
efx_qword_t *evq_hw_ring;
+ uint8_t *tsoh;
+ rte_iova_t tsoh_iova;
+ uint16_t tso_tcp_header_offset_limit;
/* Datapath transmit queue anchor */
struct sfc_dp_txq dp;
@@ -184,6 +190,30 @@ sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
ESF_DZ_TX_KER_BUF_ADDR, addr);
}
+static void
+sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq,
+ unsigned int added, uint16_t ipv4_id,
+ uint16_t outer_ipv4_id, uint32_t tcp_seq,
+ uint16_t tcp_mss)
+{
+ EFX_POPULATE_QWORD_5(txq->txq_hw_ring[added & txq->ptr_mask],
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+ EFX_POPULATE_QWORD_5(txq->txq_hw_ring[(added + 1) & txq->ptr_mask],
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
+ ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
+}
+
static inline void
sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
unsigned int pushed)
@@ -263,6 +293,252 @@ sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
extra_descs_per_pkt);
}
+static bool
+sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
+ unsigned int needed_desc, unsigned int *dma_desc_space,
+ bool *reap_done)
+{
+ if (*reap_done)
+ return false;
+
+ if (added != txq->added) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+ sfc_ef10_tx_reap(txq);
+ *reap_done = true;
+
+ /*
+ * Recalculate DMA descriptor space since Tx reap may change
+ * the number of completed descriptors
+ */
+ *dma_desc_space = txq->max_fill_level -
+ (added - txq->completed);
+
+ return (needed_desc <= *dma_desc_space);
+}
+
+static int
+sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
+ unsigned int *added, unsigned int *dma_desc_space,
+ bool *reap_done)
+{
+ size_t iph_off = m_seg->l2_len;
+ size_t tcph_off = m_seg->l2_len + m_seg->l3_len;
+ size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len;
+ /* Offset of the payload in the last segment that contains the header */
+ size_t in_off = 0;
+ const struct tcp_hdr *th;
+ uint16_t packet_id;
+ uint32_t sent_seq;
+ uint8_t *hdr_addr;
+ rte_iova_t hdr_iova;
+ struct rte_mbuf *first_m_seg = m_seg;
+ unsigned int pkt_start = *added;
+ unsigned int needed_desc;
+ struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
+ bool eop;
+
+ /* Both checks may be done, so use bit OR to have only one branching */
+ if (unlikely((header_len > SFC_TSOH_STD_LEN) |
+ (tcph_off > txq->tso_tcp_header_offset_limit)))
+ return EMSGSIZE;
+
+ /*
+ * Preliminary estimation of required DMA descriptors, including extra
+ * descriptor for TSO header that is needed when the header is
+ * separated from payload in one segment. It does not include
+ * extra descriptors that may appear when a big segment is split across
+ * several descriptors.
+ */
+ needed_desc = m_seg->nb_segs +
+ (unsigned int)SFC_TSO_OPT_DESCS_NUM +
+ (unsigned int)SFC_TSO_HDR_DESCS_NUM;
+
+ if (needed_desc > *dma_desc_space &&
+ !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
+ dma_desc_space, reap_done)) {
+ /*
+ * If a future Tx reap may increase available DMA descriptor
+ * space, do not try to send the packet.
+ */
+ if (txq->completed != pkt_start)
+ return ENOSPC;
+ /*
+ * Do not allow to send packet if the maximum DMA
+ * descriptor space is not sufficient to hold TSO
+ * descriptors, header descriptor and at least 1
+ * segment descriptor.
+ */
+ if (*dma_desc_space < SFC_TSO_OPT_DESCS_NUM +
+ SFC_TSO_HDR_DESCS_NUM + 1)
+ return EMSGSIZE;
+ }
+
+ /* Check if the header is not fragmented */
+ if (rte_pktmbuf_data_len(m_seg) >= header_len) {
+ hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *);
+ hdr_iova = rte_mbuf_data_iova(m_seg);
+ if (rte_pktmbuf_data_len(m_seg) == header_len) {
+ /*
+ * Associate header mbuf with header descriptor
+ * which is located after TSO descriptors.
+ */
+ txq->sw_ring[(pkt_start + SFC_TSO_OPT_DESCS_NUM) &
+ txq->ptr_mask].mbuf = m_seg;
+ m_seg = m_seg->next;
+ in_off = 0;
+
+ /*
+ * If there is no payload offset (payload starts at the
+ * beginning of a segment) then an extra descriptor for
+ * separated header is not needed.
+ */
+ needed_desc--;
+ } else {
+ in_off = header_len;
+ }
+ } else {
+ unsigned int copied_segs;
+ unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
+ SFC_TSOH_STD_LEN;
+
+ hdr_addr = txq->tsoh + hdr_addr_off;
+ hdr_iova = txq->tsoh_iova + hdr_addr_off;
+ copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
+ &m_seg, &in_off);
+
+ m_seg_to_free_up_to = m_seg;
+ /*
+ * Reduce the number of needed descriptors by the number of
+ * segments that entirely consist of header data.
+ */
+ needed_desc -= copied_segs;
+
+ /* Extra descriptor for separated header is not needed */
+ if (in_off == 0)
+ needed_desc--;
+ }
+
+ switch (first_m_seg->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) {
+ case PKT_TX_IPV4: {
+ const struct ipv4_hdr *iphe4;
+
+ iphe4 = (const struct ipv4_hdr *)(hdr_addr + iph_off);
+ rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
+ packet_id = rte_be_to_cpu_16(packet_id);
+ break;
+ }
+ case PKT_TX_IPV6:
+ packet_id = 0;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ th = (const struct tcp_hdr *)(hdr_addr + tcph_off);
+ rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
+ sent_seq = rte_be_to_cpu_32(sent_seq);
+
+ sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq,
+ first_m_seg->tso_segsz);
+ (*added) += SFC_TSO_OPT_DESCS_NUM;
+
+ sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
+ &txq->txq_hw_ring[(*added) & txq->ptr_mask]);
+ (*added)++;
+
+ do {
+ rte_iova_t next_frag = rte_mbuf_data_iova(m_seg);
+ unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+ unsigned int id;
+
+ next_frag += in_off;
+ seg_len -= in_off;
+ in_off = 0;
+
+ do {
+ rte_iova_t frag_addr = next_frag;
+ size_t frag_len;
+
+ frag_len = RTE_MIN(seg_len,
+ SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ next_frag += frag_len;
+ seg_len -= frag_len;
+
+ eop = (seg_len == 0 && m_seg->next == NULL);
+
+ id = (*added) & txq->ptr_mask;
+ (*added)++;
+
+ /*
+ * Initially we assume that one DMA descriptor is needed
+ * for every segment. When the segment is split across
+ * several DMA descriptors, increase the estimation.
+ */
+ needed_desc += (seg_len != 0);
+
+ /*
+ * When no more descriptors can be added, but not all
+ * segments are processed.
+ */
+ if (*added - pkt_start == *dma_desc_space &&
+ !eop &&
+ !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
+ dma_desc_space, reap_done)) {
+ struct rte_mbuf *m;
+ struct rte_mbuf *m_next;
+
+ if (txq->completed != pkt_start) {
+ unsigned int i;
+
+ /*
+ * Reset mbuf associations with added
+ * descriptors.
+ */
+ for (i = pkt_start; i != *added; i++) {
+ id = i & txq->ptr_mask;
+ txq->sw_ring[id].mbuf = NULL;
+ }
+ return ENOSPC;
+ }
+
+ /* Free the segments that cannot be sent */
+ for (m = m_seg->next; m != NULL; m = m_next) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ }
+ eop = true;
+ /* Ignore the rest of the segment */
+ seg_len = 0;
+ }
+
+ sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len,
+ eop, &txq->txq_hw_ring[id]);
+
+ } while (seg_len != 0);
+
+ txq->sw_ring[id].mbuf = m_seg;
+
+ m_seg = m_seg->next;
+ } while (!eop);
+
+ /*
+ * Free segments which content was entirely copied to the TSO header
+ * memory space of Tx queue
+ */
+ for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) {
+ struct rte_mbuf *seg_to_free = m_seg;
+
+ m_seg = m_seg->next;
+ rte_pktmbuf_free_seg(seg_to_free);
+ }
+
+ return 0;
+}
+
static uint16_t
sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -296,6 +572,30 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (likely(pktp + 1 != pktp_end))
rte_mbuf_prefetch_part1(pktp[1]);
+ if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ int rc;
+
+ rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added,
+ &dma_desc_space, &reap_done);
+ if (rc != 0) {
+ added = pkt_start;
+
+ /* Packet can be sent in following xmit calls */
+ if (likely(rc == ENOSPC))
+ break;
+
+ /*
+ * Packet cannot be sent, tell RTE that
+ * it is sent, but actually drop it and
+ * continue with another packet
+ */
+ rte_pktmbuf_free(*pktp);
+ continue;
+ }
+
+ goto dma_desc_space_update;
+ }
+
if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
if (reap_done)
break;
@@ -349,6 +649,7 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
} while ((m_seg = m_seg->next) != 0);
+dma_desc_space_update:
dma_desc_space -= (added - pkt_start);
}
@@ -524,6 +825,18 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
if (txq->sw_ring == NULL)
goto fail_sw_ring_alloc;
+ if (info->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
+ info->txq_entries,
+ SFC_TSOH_STD_LEN,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (txq->tsoh == NULL)
+ goto fail_tsoh_alloc;
+
+ txq->tsoh_iova = rte_malloc_virt2iova(txq->tsoh);
+ }
+
txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
txq->ptr_mask = info->txq_entries - 1;
txq->max_fill_level = info->max_fill_level;
@@ -533,10 +846,14 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
ER_DZ_TX_DESC_UPD_REG_OFST +
(info->hw_index << info->vi_window_shift);
txq->evq_hw_ring = info->evq_hw_ring;
+ txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
*dp_txqp = &txq->dp;
return 0;
+fail_tsoh_alloc:
+ rte_free(txq->sw_ring);
+
fail_sw_ring_alloc:
rte_free(txq);
@@ -551,6 +868,7 @@ sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
{
struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+ rte_free(txq->tsoh);
rte_free(txq->sw_ring);
rte_free(txq);
}
@@ -618,12 +936,49 @@ sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
txq->flags &= ~SFC_EF10_TXQ_STARTED;
}
+static unsigned int
+sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq)
+{
+ const unsigned int curr_done = txq->completed - 1;
+ unsigned int anew_done = curr_done;
+ efx_qword_t tx_ev;
+ const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ while (sfc_ef10_tx_get_event(txq, &tx_ev))
+ anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
+
+ /*
+ * The function does not process events, so return event queue read
+ * pointer to the original position to allow the events that were
+ * read to be processed later
+ */
+ txq->evq_read_ptr = evq_old_read_ptr;
+
+ return (anew_done - curr_done) & txq->ptr_mask;
+}
+
static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
static int
-sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
- __rte_unused uint16_t offset)
+sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq,
+ uint16_t offset)
{
- return -ENOTSUP;
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+ unsigned int npending = sfc_ef10_tx_qdesc_npending(txq);
+
+ if (unlikely(offset > txq->ptr_mask))
+ return -EINVAL;
+
+ if (unlikely(offset >= txq->max_fill_level))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ if (unlikely(offset < npending))
+ return RTE_ETH_TX_DESC_FULL;
+
+ return RTE_ETH_TX_DESC_DONE;
}
struct sfc_dp_tx sfc_ef10_tx = {
@@ -632,7 +987,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
.type = SFC_DP_TX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = SFC_DP_TX_FEAT_MULTI_SEG |
+ .features = SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_SEG |
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_PROCESS,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 9decbf5a..3886daf7 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -171,6 +171,9 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
sa->dp_rx->get_dev_info(dev_info);
if (sa->dp_tx->get_dev_info != NULL)
sa->dp_tx->get_dev_info(dev_info);
+
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
}
static const uint32_t *
@@ -441,8 +444,6 @@ sfc_rx_queue_release(void *queue)
sfc_log_init(sa, "RxQ=%u", sw_index);
- sa->eth_dev->data->rx_queues[sw_index] = NULL;
-
sfc_rx_qfini(sa, sw_index);
sfc_adapter_unlock(sa);
@@ -497,9 +498,6 @@ sfc_tx_queue_release(void *queue)
sfc_adapter_lock(sa);
- SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
- sa->eth_dev->data->tx_queues[sw_index] = NULL;
-
sfc_tx_qfini(sa, sw_index);
sfc_adapter_unlock(sa);
@@ -1143,6 +1141,9 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
+ if (sa->rxq_info[rx_queue_id].rxq == NULL)
+ goto fail_not_setup;
+
rc = sfc_rx_qstart(sa, rx_queue_id);
if (rc != 0)
goto fail_rx_qstart;
@@ -1154,6 +1155,7 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return 0;
fail_rx_qstart:
+fail_not_setup:
fail_not_started:
sfc_adapter_unlock(sa);
SFC_ASSERT(rc > 0);
@@ -1191,6 +1193,9 @@ sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
+ if (sa->txq_info[tx_queue_id].txq == NULL)
+ goto fail_not_setup;
+
rc = sfc_tx_qstart(sa, tx_queue_id);
if (rc != 0)
goto fail_tx_qstart;
@@ -1202,6 +1207,7 @@ sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
fail_tx_qstart:
+fail_not_setup:
fail_not_started:
sfc_adapter_unlock(sa);
SFC_ASSERT(rc > 0);
@@ -1348,14 +1354,10 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
{
struct sfc_adapter *sa = dev->data->dev_private;
struct sfc_rss *rss = &sa->rss;
- struct sfc_port *port = &sa->port;
- if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE)
return -ENOTSUP;
- if (rss->channels == 0)
- return -EINVAL;
-
sfc_adapter_lock(sa);
/*
@@ -2033,9 +2035,6 @@ sfc_eth_dev_uninit(struct rte_eth_dev *dev)
sfc_detach(sa);
sfc_unprobe(sa);
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
sfc_kvargs_cleanup(sa);
sfc_adapter_unlock(sa);
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index d8503e20..c792e0b2 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -96,13 +96,12 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
++i, id = (id + 1) & rxq->ptr_mask) {
m = objs[i];
+ MBUF_RAW_ALLOC_CHECK(m);
+
rxd = &rxq->sw_desc[id];
rxd->mbuf = m;
- SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
m->data_off = RTE_PKTMBUF_HEADROOM;
- SFC_ASSERT(m->next == NULL);
- SFC_ASSERT(m->nb_segs == 1);
m->port = port_id;
addr[i] = rte_pktmbuf_iova(m);
@@ -296,7 +295,7 @@ sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
discard:
discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
- rte_mempool_put(rxq->refill_mb_pool, m);
+ rte_mbuf_raw_free(m);
rxd->mbuf = NULL;
}
@@ -498,7 +497,7 @@ sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
for (i = rxq->completed; i != rxq->added; ++i) {
rxd = &rxq->sw_desc[i & rxq->ptr_mask];
- rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rte_mbuf_raw_free(rxd->mbuf);
rxd->mbuf = NULL;
/* Packed stream relies on 0 in inactive SW desc.
* Rx queue stop is not performance critical, so
@@ -673,6 +672,7 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
rxq_info = &sa->rxq_info[sw_index];
rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
evq = rxq->evq;
@@ -763,7 +763,7 @@ sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
rxq_info = &sa->rxq_info[sw_index];
rxq = rxq_info->rxq;
- if (rxq->state == SFC_RXQ_INITIALIZED)
+ if (rxq == NULL || rxq->state == SFC_RXQ_INITIALIZED)
return;
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
@@ -792,7 +792,6 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
uint64_t caps = 0;
caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- caps |= DEV_RX_OFFLOAD_CRC_STRIP;
if (sa->dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
@@ -1103,6 +1102,7 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_rxq *rxq;
SFC_ASSERT(sw_index < sa->rxq_count);
+ sa->eth_dev->data->rx_queues[sw_index] = NULL;
rxq_info = &sa->rxq_info[sw_index];
@@ -1126,7 +1126,7 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
/*
* Mapping between RTE RSS hash functions and their EFX counterparts.
*/
-struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
+static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
{ ETH_RSS_NONFRAG_IPV4_TCP,
EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
{ ETH_RSS_NONFRAG_IPV4_UDP,
@@ -1200,7 +1200,7 @@ sfc_rx_hash_init(struct sfc_adapter *sa)
return EINVAL;
rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
- &nb_flags_supp);
+ RTE_DIM(flags_supp), &nb_flags_supp);
if (rc != 0)
return rc;
@@ -1363,7 +1363,8 @@ sfc_rx_start(struct sfc_adapter *sa)
goto fail_rss_config;
for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
- if ((!sa->rxq_info[sw_index].deferred_start ||
+ if (sa->rxq_info[sw_index].rxq != NULL &&
+ (!sa->rxq_info[sw_index].deferred_start ||
sa->rxq_info[sw_index].deferred_started)) {
rc = sfc_rx_qstart(sa, sw_index);
if (rc != 0)
@@ -1439,14 +1440,6 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
rc = EINVAL;
}
- /* KEEP_CRC offload flag is not supported by PMD
- * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
- */
- if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
- sfc_warn(sa, "FCS stripping cannot be disabled - always on");
- rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
/*
* Requested offloads are validated against supported by ethdev,
* so unsupported offloads cannot be added as the result of
@@ -1511,7 +1504,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
goto fail_check_mode;
if (nb_rx_queues == sa->rxq_count)
- goto done;
+ goto configure_rss;
if (sa->rxq_info == NULL) {
rc = ENOMEM;
@@ -1548,6 +1541,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
sa->rxq_count++;
}
+configure_rss:
rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
MIN(sa->rxq_count, EFX_MAXRSS) : 0;
@@ -1564,7 +1558,6 @@ sfc_rx_configure(struct sfc_adapter *sa)
goto fail_rx_process_adv_conf_rss;
}
-done:
return 0;
fail_rx_process_adv_conf_rss:
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index effe9853..076a25d4 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -14,12 +14,7 @@
#include "sfc_debug.h"
#include "sfc_tx.h"
#include "sfc_ev.h"
-
-/** Standard TSO header length */
-#define SFC_TSOH_STD_LEN 256
-
-/** The number of TSO option descriptors that precede the packet descriptors */
-#define SFC_TSO_OPDESCS_IDX_SHIFT 2
+#include "sfc_tso.h"
int
sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
@@ -57,13 +52,14 @@ sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
}
}
-static void
-sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
- size_t *in_off, unsigned int idx, size_t bytes_left)
+unsigned int
+sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len,
+ struct rte_mbuf **in_seg, size_t *in_off)
{
struct rte_mbuf *m = *in_seg;
size_t bytes_to_copy = 0;
- uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+ size_t bytes_left = header_len;
+ unsigned int segments_copied = 0;
do {
bytes_to_copy = MIN(bytes_left, m->data_len);
@@ -77,16 +73,20 @@ sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
if (bytes_left > 0) {
m = m->next;
SFC_ASSERT(m != NULL);
+ segments_copied++;
}
} while (bytes_left > 0);
if (bytes_to_copy == m->data_len) {
*in_seg = m->next;
*in_off = 0;
+ segments_copied++;
} else {
*in_seg = m;
*in_off = bytes_to_copy;
}
+
+ return segments_copied;
}
int
@@ -105,7 +105,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
size_t header_len = m->l2_len + m->l3_len + m->l4_len;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
- idx += SFC_TSO_OPDESCS_IDX_SHIFT;
+ idx += SFC_TSO_OPT_DESCS_NUM;
/* Packets which have too big headers should be discarded */
if (unlikely(header_len > SFC_TSOH_STD_LEN))
@@ -129,9 +129,8 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
* limitations on address boundaries crossing by DMA descriptor data.
*/
if (m->data_len < header_len) {
- sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx,
- header_len);
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+ sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off);
header_paddr = rte_malloc_virt2iova((void *)tsoh);
} else {
diff --git a/drivers/net/sfc/sfc_tso.h b/drivers/net/sfc/sfc_tso.h
new file mode 100644
index 00000000..3d2faf54
--- /dev/null
+++ b/drivers/net/sfc/sfc_tso.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/** Standard TSO header length */
+#define SFC_TSOH_STD_LEN 256
+
+/** The number of TSO option descriptors that precede the packet descriptors */
+#define SFC_TSO_OPT_DESCS_NUM 2
+
+/**
+ * The number of DMA descriptors for TSO header that may or may not precede the
+ * packet's payload descriptors
+ */
+#define SFC_TSO_HDR_DESCS_NUM 1
+
+unsigned int sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len,
+ struct rte_mbuf **in_seg, size_t *in_off);
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 6d42a1a6..147f9336 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -190,6 +190,8 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.hw_index = txq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
info.vi_window_shift = encp->enc_vi_window_shift;
+ info.tso_tcp_header_offset_limit =
+ encp->enc_tx_tso_tcp_header_offset_limit;
rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
@@ -233,6 +235,8 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
sfc_log_init(sa, "TxQ = %u", sw_index);
SFC_ASSERT(sw_index < sa->txq_count);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
txq_info = &sa->txq_info[sw_index];
txq = txq_info->txq;
@@ -421,6 +425,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
txq = txq_info->txq;
+ SFC_ASSERT(txq != NULL);
SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
evq = txq->evq;
@@ -501,7 +506,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
txq = txq_info->txq;
- if (txq->state == SFC_TXQ_INITIALIZED)
+ if (txq == NULL || txq->state == SFC_TXQ_INITIALIZED)
return;
SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
@@ -578,8 +583,9 @@ sfc_tx_start(struct sfc_adapter *sa)
goto fail_efx_tx_init;
for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
- if (!(sa->txq_info[sw_index].deferred_start) ||
- sa->txq_info[sw_index].deferred_started) {
+ if (sa->txq_info[sw_index].txq != NULL &&
+ (!(sa->txq_info[sw_index].deferred_start) ||
+ sa->txq_info[sw_index].deferred_started)) {
rc = sfc_tx_qstart(sa, sw_index);
if (rc != 0)
goto fail_tx_qstart;
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
index ea9b65f4..484e76cd 100644
--- a/drivers/net/softnic/Makefile
+++ b/drivers/net/softnic/Makefile
@@ -14,6 +14,7 @@ CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_pipeline -lrte_port -lrte_table
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched
+LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
EXPORT_MAP := rte_pmd_softnic_version.map
@@ -33,6 +34,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_meter.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cryptodev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c
diff --git a/drivers/net/softnic/conn.c b/drivers/net/softnic/conn.c
index 990cf40f..8b665808 100644
--- a/drivers/net/softnic/conn.c
+++ b/drivers/net/softnic/conn.c
@@ -8,7 +8,6 @@
#include <unistd.h>
#include <sys/types.h>
-#define __USE_GNU
#include <sys/socket.h>
#include <sys/epoll.h>
diff --git a/drivers/net/softnic/hash_func.h b/drivers/net/softnic/hash_func.h
deleted file mode 100644
index 198d2b20..00000000
--- a/drivers/net/softnic/hash_func.h
+++ /dev/null
@@ -1,359 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2018 Intel Corporation
- */
-
-#ifndef __INCLUDE_HASH_FUNC_H__
-#define __INCLUDE_HASH_FUNC_H__
-
-#include <rte_common.h>
-
-static inline uint64_t
-hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = seed ^ (k[0] & m[0]);
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
-
- xor0 ^= k[2] & m[2];
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
-
- xor0 ^= xor1;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
-
- xor0 ^= xor1;
-
- xor0 ^= k[4] & m[4];
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
-
- xor0 ^= xor1;
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
-
- xor0 ^= xor1;
- xor2 ^= k[6] & m[6];
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2, xor3;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
- xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
-
- xor0 ^= xor1;
- xor2 ^= xor3;
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-#if defined(RTE_ARCH_X86_64)
-
-#include <x86intrin.h>
-
-static inline uint64_t
-hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t crc0;
-
- crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, crc0, crc1;
-
- k0 = k[0] & m[0];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc0 = _mm_crc32_u64(crc0, k2);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = k2 >> 32;
-
- crc0 = _mm_crc32_u64(crc0, crc1);
- crc1 = _mm_crc32_u64(crc2, crc3);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc0 = _mm_crc32_u64(crc0, crc1);
- crc1 = _mm_crc32_u64(crc2, crc3);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, k5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
- crc5 = k5 >> 32;
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
- crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-#define hash_default_key8 hash_crc_key8
-#define hash_default_key16 hash_crc_key16
-#define hash_default_key24 hash_crc_key24
-#define hash_default_key32 hash_crc_key32
-#define hash_default_key40 hash_crc_key40
-#define hash_default_key48 hash_crc_key48
-#define hash_default_key56 hash_crc_key56
-#define hash_default_key64 hash_crc_key64
-
-#elif defined(RTE_ARCH_ARM64)
-#include "hash_func_arm64.h"
-#else
-
-#define hash_default_key8 hash_xor_key8
-#define hash_default_key16 hash_xor_key16
-#define hash_default_key24 hash_xor_key24
-#define hash_default_key32 hash_xor_key32
-#define hash_default_key40 hash_xor_key40
-#define hash_default_key48 hash_xor_key48
-#define hash_default_key56 hash_xor_key56
-#define hash_default_key64 hash_xor_key64
-
-#endif
-
-#endif
diff --git a/drivers/net/softnic/hash_func_arm64.h b/drivers/net/softnic/hash_func_arm64.h
deleted file mode 100644
index ae6c0f41..00000000
--- a/drivers/net/softnic/hash_func_arm64.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Linaro Limited. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __HASH_FUNC_ARM64_H__
-#define __HASH_FUNC_ARM64_H__
-
-#define _CRC32CX(crc, val) \
- __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
-
-static inline uint64_t
-hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint32_t crc0;
-
- crc0 = seed;
- _CRC32CX(crc0, k[0] & m[0]);
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0;
- uint64_t *m = mask;
- uint32_t crc0, crc1;
-
- k0 = k[0] & m[0];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- _CRC32CX(crc0, k2);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
-
- _CRC32CX(crc0, crc1);
- _CRC32CX(crc2, crc3);
-
- crc0 ^= crc2;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- _CRC32CX(crc0, crc1);
- _CRC32CX(crc2, crc3);
-
- crc0 ^= crc2;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, k5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- crc4 = k5;
- _CRC32CX(crc4, k[6] & m[6]);
- crc5 = k5 >> 32;
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- crc4 = k5;
- _CRC32CX(crc4, k[6] & m[6]);
- crc5 = k5 >> 32;
- _CRC32CX(crc5, k[7] & m[7]);
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-#define hash_default_key8 hash_crc_key8
-#define hash_default_key16 hash_crc_key16
-#define hash_default_key24 hash_crc_key24
-#define hash_default_key32 hash_crc_key32
-#define hash_default_key40 hash_crc_key40
-#define hash_default_key48 hash_crc_key48
-#define hash_default_key56 hash_crc_key56
-#define hash_default_key64 hash_crc_key64
-
-#endif
diff --git a/drivers/net/softnic/meson.build b/drivers/net/softnic/meson.build
index ff982274..da249c06 100644
--- a/drivers/net/softnic/meson.build
+++ b/drivers/net/softnic/meson.build
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
+if host_machine.system() != 'linux'
+ build = false
+endif
allow_experimental_apis = true
install_headers('rte_eth_softnic.h')
sources = files('rte_eth_softnic_tm.c',
@@ -13,6 +16,9 @@ sources = files('rte_eth_softnic_tm.c',
'rte_eth_softnic_pipeline.c',
'rte_eth_softnic_thread.c',
'rte_eth_softnic_cli.c',
+ 'rte_eth_softnic_flow.c',
+ 'rte_eth_softnic_meter.c',
+ 'rte_eth_softnic_cryptodev.c',
'parser.c',
'conn.c')
-deps += ['pipeline', 'port', 'table', 'sched']
+deps += ['pipeline', 'port', 'table', 'sched', 'cryptodev']
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 30fb3952..743a7c58 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -14,6 +14,7 @@
#include <rte_errno.h>
#include <rte_ring.h>
#include <rte_tm_driver.h>
+#include <rte_mtr_driver.h>
#include "rte_eth_softnic.h"
#include "rte_eth_softnic_internals.h"
@@ -27,7 +28,7 @@
#define PMD_PARAM_TM_QSIZE2 "tm_qsize2"
#define PMD_PARAM_TM_QSIZE3 "tm_qsize3"
-static const char *pmd_valid_args[] = {
+static const char * const pmd_valid_args[] = {
PMD_PARAM_FIRMWARE,
PMD_PARAM_CONN_PORT,
PMD_PARAM_CPU_ID,
@@ -46,7 +47,7 @@ static const char welcome[] =
static const char prompt[] = "softnic> ";
-struct softnic_conn_params conn_params_default = {
+static const struct softnic_conn_params conn_params_default = {
.welcome = welcome,
.prompt = prompt,
.addr = "0.0.0.0",
@@ -73,7 +74,6 @@ static const struct rte_eth_dev_info pmd_dev_info = {
.nb_min = 0,
.nb_align = 1,
},
- .rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP,
};
static int pmd_softnic_logtype;
@@ -190,6 +190,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
softnic_mempool_free(p);
tm_hierarchy_free(p);
+ softnic_mtr_free(p);
}
static void
@@ -206,6 +207,21 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused,
}
static int
+pmd_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ if (filter_type == RTE_ETH_FILTER_GENERIC &&
+ filter_op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &pmd_flow_ops;
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
{
*(const struct rte_tm_ops **)arg = &pmd_tm_ops;
@@ -213,6 +229,14 @@ pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
return 0;
}
+static int
+pmd_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+ *(const struct rte_mtr_ops **)arg = &pmd_mtr_ops;
+
+ return 0;
+}
+
static const struct eth_dev_ops pmd_ops = {
.dev_configure = pmd_dev_configure,
.dev_start = pmd_dev_start,
@@ -222,7 +246,9 @@ static const struct eth_dev_ops pmd_ops = {
.dev_infos_get = pmd_dev_infos_get,
.rx_queue_setup = pmd_rx_queue_setup,
.tx_queue_setup = pmd_tx_queue_setup,
+ .filter_ctrl = pmd_filter_ctrl,
.tm_ops_get = pmd_tm_ops_get,
+ .mtr_ops_get = pmd_mtr_ops_get,
};
static uint16_t
@@ -265,12 +291,14 @@ pmd_init(struct pmd_params *params)
/* Resources */
tm_hierarchy_init(p);
+ softnic_mtr_init(p);
softnic_mempool_init(p);
softnic_swq_init(p);
softnic_link_init(p);
softnic_tmgr_init(p);
softnic_tap_init(p);
+ softnic_cryptodev_init(p);
softnic_port_in_action_profile_init(p);
softnic_table_action_profile_init(p);
softnic_pipeline_init(p);
@@ -319,6 +347,7 @@ pmd_free(struct pmd_internals *p)
softnic_mempool_free(p);
tm_hierarchy_free(p);
+ softnic_mtr_free(p);
rte_free(p);
}
@@ -528,7 +557,6 @@ static int
pmd_remove(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *dev = NULL;
- struct pmd_internals *p;
if (!vdev)
return -EINVAL;
@@ -539,12 +567,12 @@ pmd_remove(struct rte_vdev_device *vdev)
dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
if (dev == NULL)
return -ENODEV;
- p = dev->data->dev_private;
/* Free device data structures*/
- rte_free(dev->data);
+ pmd_free(dev->data->dev_private);
+ dev->data->dev_private = NULL; /* already freed */
+ dev->data->mac_addrs = NULL; /* statically allocated */
rte_eth_dev_release_port(dev);
- pmd_free(p);
return 0;
}
diff --git a/drivers/net/softnic/rte_eth_softnic_action.c b/drivers/net/softnic/rte_eth_softnic_action.c
index c25f4dd9..92c744dc 100644
--- a/drivers/net/softnic/rte_eth_softnic_action.c
+++ b/drivers/net/softnic/rte_eth_softnic_action.c
@@ -7,8 +7,8 @@
#include <string.h>
#include <rte_string_fns.h>
+#include <rte_table_hash_func.h>
-#include "hash_func.h"
#include "rte_eth_softnic_internals.h"
/**
@@ -72,35 +72,35 @@ softnic_port_in_action_profile_create(struct pmd_internals *p,
params->lb.f_hash == NULL) {
switch (params->lb.key_size) {
case 8:
- params->lb.f_hash = hash_default_key8;
+ params->lb.f_hash = rte_table_hash_crc_key8;
break;
case 16:
- params->lb.f_hash = hash_default_key16;
+ params->lb.f_hash = rte_table_hash_crc_key16;
break;
case 24:
- params->lb.f_hash = hash_default_key24;
+ params->lb.f_hash = rte_table_hash_crc_key24;
break;
case 32:
- params->lb.f_hash = hash_default_key32;
+ params->lb.f_hash = rte_table_hash_crc_key32;
break;
case 40:
- params->lb.f_hash = hash_default_key40;
+ params->lb.f_hash = rte_table_hash_crc_key40;
break;
case 48:
- params->lb.f_hash = hash_default_key48;
+ params->lb.f_hash = rte_table_hash_crc_key48;
break;
case 56:
- params->lb.f_hash = hash_default_key56;
+ params->lb.f_hash = rte_table_hash_crc_key56;
break;
case 64:
- params->lb.f_hash = hash_default_key64;
+ params->lb.f_hash = rte_table_hash_crc_key64;
break;
default:
@@ -223,35 +223,35 @@ softnic_table_action_profile_create(struct pmd_internals *p,
params->lb.f_hash == NULL) {
switch (params->lb.key_size) {
case 8:
- params->lb.f_hash = hash_default_key8;
+ params->lb.f_hash = rte_table_hash_crc_key8;
break;
case 16:
- params->lb.f_hash = hash_default_key16;
+ params->lb.f_hash = rte_table_hash_crc_key16;
break;
case 24:
- params->lb.f_hash = hash_default_key24;
+ params->lb.f_hash = rte_table_hash_crc_key24;
break;
case 32:
- params->lb.f_hash = hash_default_key32;
+ params->lb.f_hash = rte_table_hash_crc_key32;
break;
case 40:
- params->lb.f_hash = hash_default_key40;
+ params->lb.f_hash = rte_table_hash_crc_key40;
break;
case 48:
- params->lb.f_hash = hash_default_key48;
+ params->lb.f_hash = rte_table_hash_crc_key48;
break;
case 56:
- params->lb.f_hash = hash_default_key56;
+ params->lb.f_hash = rte_table_hash_crc_key56;
break;
case 64:
- params->lb.f_hash = hash_default_key64;
+ params->lb.f_hash = rte_table_hash_crc_key64;
break;
default:
@@ -364,6 +364,39 @@ softnic_table_action_profile_create(struct pmd_internals *p,
}
}
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TAG,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_DECAP,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_SYM_CRYPTO,
+ &params->sym_crypto);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
status = rte_table_action_profile_freeze(ap);
if (status) {
rte_table_action_profile_free(ap);
diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c
index 0c7448cc..c6640d65 100644
--- a/drivers/net/softnic/rte_eth_softnic_cli.c
+++ b/drivers/net/softnic/rte_eth_softnic_cli.c
@@ -9,6 +9,8 @@
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev.h>
#include "rte_eth_softnic_internals.h"
#include "parser.h"
@@ -1089,6 +1091,67 @@ cmd_tap(struct pmd_internals *softnic,
}
/**
+ * cryptodev <tap_name> dev <device_name> | dev_id <device_id>
+ * queue <n_queues> <queue_size>
+ **/
+
+static void
+cmd_cryptodev(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_cryptodev_params params;
+ char *name;
+
+ memset(&params, 0, sizeof(params));
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0)
+ params.dev_name = tokens[3];
+ else if (strcmp(tokens[2], "dev_id") == 0) {
+ if (softnic_parser_read_uint32(&params.dev_id, tokens[3]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "dev_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "cryptodev");
+ return;
+ }
+
+ if (strcmp(tokens[4], "queue")) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "4");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&params.n_queues, tokens[5]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "q");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&params.queue_size, tokens[6]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_size");
+ return;
+ }
+
+ if (softnic_cryptodev_create(softnic, name, &params) == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
* port in action profile <profile_name>
* [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>]
* [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>]
@@ -1272,13 +1335,17 @@ cmd_port_in_action_profile(struct pmd_internals *softnic,
* tc <n_tc>
* stats none | pkts | bytes | both]
* [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]
- * [encap ether | vlan | qinq | mpls | pppoe]
+ * [encap ether | vlan | qinq | mpls | pppoe |
+ * vxlan offset <ether_offset> ipv4 | ipv6 vlan on | off]
* [nat src | dst
* proto udp | tcp]
* [ttl drop | fwd
* stats none | pkts]
* [stats pkts | bytes | both]
* [time]
+ * [tag]
+ * [decap]
+ *
*/
static void
cmd_table_action_profile(struct pmd_internals *softnic,
@@ -1478,6 +1545,8 @@ cmd_table_action_profile(struct pmd_internals *softnic,
if (t0 < n_tokens &&
(strcmp(tokens[t0], "encap") == 0)) {
+ uint32_t n_extra_tokens = 0;
+
if (n_tokens < t0 + 2) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
"action profile encap");
@@ -1494,13 +1563,61 @@ cmd_table_action_profile(struct pmd_internals *softnic,
p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS;
} else if (strcmp(tokens[t0 + 1], "pppoe") == 0) {
p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE;
+ } else if (strcmp(tokens[t0 + 1], "vxlan") == 0) {
+ if (n_tokens < t0 + 2 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "action profile encap vxlan");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "vxlan: offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.encap.vxlan.data_offset,
+ tokens[t0 + 2 + 1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: ether_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 2], "ipv4") == 0)
+ p.encap.vxlan.ip_version = 1;
+ else if (strcmp(tokens[t0 + 2 + 2], "ipv6") == 0)
+ p.encap.vxlan.ip_version = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 3], "vlan") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "vxlan: vlan");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 4], "on") == 0)
+ p.encap.vxlan.vlan = 1;
+ else if (strcmp(tokens[t0 + 2 + 4], "off") == 0)
+ p.encap.vxlan.vlan = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: on or off");
+ return;
+ }
+
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN;
+ n_extra_tokens = 5;
+
} else {
snprintf(out, out_size, MSG_ARG_MISMATCH, "encap");
return;
}
-
p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP;
- t0 += 2;
+ t0 += 2 + n_extra_tokens;
} /* encap */
if (t0 < n_tokens &&
@@ -1610,6 +1727,18 @@ cmd_table_action_profile(struct pmd_internals *softnic,
t0 += 1;
} /* time */
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "tag") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TAG;
+ t0 += 1;
+ } /* tag */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "decap") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_DECAP;
+ t0 += 1;
+ } /* decap */
+
if (t0 < n_tokens) {
snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
return;
@@ -1682,6 +1811,7 @@ cmd_pipeline(struct pmd_internals *softnic,
* | tmgr <tmgr_name>
* | tap <tap_name> mempool <mempool_name> mtu <mtu>
* | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>
+ * | cryptodev <cryptodev_name> rxq <queue_id>
* [action <port_in_action_profile_name>]
* [disabled]
*/
@@ -1697,6 +1827,8 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
uint32_t t0;
int enabled, status;
+ memset(&p, 0, sizeof(p));
+
if (n_tokens < 7) {
snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
return;
@@ -1735,7 +1867,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_RXQ;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
if (strcmp(tokens[t0 + 2], "rxq") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
@@ -1758,7 +1890,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_SWQ;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
t0 += 2;
} else if (strcmp(tokens[t0], "tmgr") == 0) {
@@ -1770,7 +1902,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_TMGR;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
t0 += 2;
} else if (strcmp(tokens[t0], "tap") == 0) {
@@ -1782,7 +1914,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_TAP;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
if (strcmp(tokens[t0 + 2], "mempool") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND,
@@ -1814,8 +1946,6 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_SOURCE;
- p.dev_name = NULL;
-
if (strcmp(tokens[t0 + 1], "mempool") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND,
"mempool");
@@ -1846,12 +1976,32 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
}
t0 += 7;
+ } else if (strcmp(tokens[t0], "cryptodev") == 0) {
+ if (n_tokens < t0 + 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in cryptodev");
+ return;
+ }
+
+ p.type = PORT_IN_CRYPTODEV;
+
+ strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name));
+ if (softnic_parser_read_uint16(&p.rxq.queue_id,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "rxq");
+ return;
+ }
+
+ p.cryptodev.arg_callback = NULL;
+ p.cryptodev.f_callback = NULL;
+
+ t0 += 4;
} else {
snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
return;
}
- p.action_profile_name = NULL;
if (n_tokens > t0 &&
(strcmp(tokens[t0], "action") == 0)) {
if (n_tokens < t0 + 2) {
@@ -1859,7 +2009,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
return;
}
- p.action_profile_name = tokens[t0 + 1];
+ strcpy(p.action_profile_name, tokens[t0 + 1]);
t0 += 2;
}
@@ -1895,6 +2045,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
* | tmgr <tmgr_name>
* | tap <tap_name>
* | sink [file <file_name> pkts <max_n_pkts>]
+ * | cryptodev <cryptodev_name> txq <txq_id> offset <crypto_op_offset>
*/
static void
cmd_pipeline_port_out(struct pmd_internals *softnic,
@@ -1945,7 +2096,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_TXQ;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
if (strcmp(tokens[8], "txq") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
@@ -1966,7 +2117,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_SWQ;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
} else if (strcmp(tokens[6], "tmgr") == 0) {
if (n_tokens != 8) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
@@ -1976,7 +2127,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_TMGR;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
} else if (strcmp(tokens[6], "tap") == 0) {
if (n_tokens != 8) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
@@ -1986,7 +2137,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_TAP;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
} else if (strcmp(tokens[6], "sink") == 0) {
if ((n_tokens != 7) && (n_tokens != 11)) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
@@ -1996,8 +2147,6 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_SINK;
- p.dev_name = NULL;
-
if (n_tokens == 7) {
p.sink.file_name = NULL;
p.sink.max_n_pkts = 0;
@@ -2021,6 +2170,40 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
return;
}
}
+ } else if (strcmp(tokens[6], "cryptodev") == 0) {
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ p.type = PORT_OUT_CRYPTODEV;
+
+ strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name));
+
+ if (strcmp(tokens[8], "txq")) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.cryptodev.queue_id, tokens[9])
+ != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
+
+ if (strcmp(tokens[10], "offset")) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.cryptodev.op_offset,
+ tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
} else {
snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
return;
@@ -2064,12 +2247,13 @@ cmd_pipeline_table(struct pmd_internals *softnic,
char *out,
size_t out_size)
{
- uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
struct softnic_table_params p;
char *pipeline_name;
uint32_t t0;
int status;
+ memset(&p, 0, sizeof(p));
+
if (n_tokens < 5) {
snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
return;
@@ -2203,12 +2387,11 @@ cmd_pipeline_table(struct pmd_internals *softnic,
}
if ((softnic_parse_hex_string(tokens[t0 + 5],
- key_mask, &key_mask_size) != 0) ||
+ p.match.hash.key_mask, &key_mask_size) != 0) ||
key_mask_size != p.match.hash.key_size) {
snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
return;
}
- p.match.hash.key_mask = key_mask;
if (strcmp(tokens[t0 + 6], "offset") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
@@ -2295,7 +2478,6 @@ cmd_pipeline_table(struct pmd_internals *softnic,
return;
}
- p.action_profile_name = NULL;
if (n_tokens > t0 &&
(strcmp(tokens[t0], "action") == 0)) {
if (n_tokens < t0 + 2) {
@@ -2303,7 +2485,7 @@ cmd_pipeline_table(struct pmd_internals *softnic,
return;
}
- p.action_profile_name = tokens[t0 + 1];
+ strcpy(p.action_profile_name, tokens[t0 + 1]);
t0 += 2;
}
@@ -3176,10 +3358,30 @@ parse_match(char **tokens,
* [label2 <label> <tc> <ttl>
* [label3 <label> <tc> <ttl>]]]
* | pppoe <da> <sa> <session_id>]
+ * | vxlan ether <da> <sa>
+ * [vlan <pcp> <dei> <vid>]
+ * ipv4 <sa> <da> <dscp> <ttl>
+ * | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit>
+ * udp <sp> <dp>
+ * vxlan <vni>]
* [nat ipv4 | ipv6 <addr> <port>]
* [ttl dec | keep]
* [stats]
* [time]
+ * [tag <tag>]
+ * [decap <n>]
+ * [sym_crypto
+ * encrypt | decrypt
+ * type
+ * | cipher
+ * cipher_algo <algo> cipher_key <key> cipher_iv <iv>
+ * | cipher_auth
+ * cipher_algo <algo> cipher_key <key> cipher_iv <iv>
+ * auth_algo <algo> auth_key <key> digest_size <size>
+ * | aead
+ * aead_algo <algo> aead_key <key> aead_iv <iv> aead_aad <aad>
+ * digest_size <size>
+ * data_offset <data_offset>]
*
* where:
* <pa> ::= g | y | r | drop
@@ -3575,6 +3777,122 @@ parse_table_action_encap(char **tokens,
return 1 + 4;
}
+ /* vxlan */
+ if (n_tokens && (strcmp(tokens[0], "vxlan") == 0)) {
+ uint32_t n = 0;
+
+ n_tokens--;
+ tokens++;
+ n++;
+
+ /* ether <da> <sa> */
+ if ((n_tokens < 3) ||
+ strcmp(tokens[0], "ether") ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.vxlan.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.vxlan.ether.sa))
+ return 0;
+
+ n_tokens -= 3;
+ tokens += 3;
+ n += 3;
+
+ /* [vlan <pcp> <dei> <vid>] */
+ if (strcmp(tokens[0], "vlan") == 0) {
+ uint32_t pcp, dei, vid;
+
+ if ((n_tokens < 4) ||
+ softnic_parser_read_uint32(&pcp, tokens[1]) ||
+ (pcp > 7) ||
+ softnic_parser_read_uint32(&dei, tokens[2]) ||
+ (dei > 1) ||
+ softnic_parser_read_uint32(&vid, tokens[3]) ||
+ (vid > 0xFFF))
+ return 0;
+
+ a->encap.vxlan.vlan.pcp = pcp;
+ a->encap.vxlan.vlan.dei = dei;
+ a->encap.vxlan.vlan.vid = vid;
+
+ n_tokens -= 4;
+ tokens += 4;
+ n += 4;
+ }
+
+ /* ipv4 <sa> <da> <dscp> <ttl>
+ | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit> */
+ if (strcmp(tokens[0], "ipv4") == 0) {
+ struct in_addr sa, da;
+ uint8_t dscp, ttl;
+
+ if ((n_tokens < 5) ||
+ softnic_parse_ipv4_addr(tokens[1], &sa) ||
+ softnic_parse_ipv4_addr(tokens[2], &da) ||
+ softnic_parser_read_uint8(&dscp, tokens[3]) ||
+ (dscp > 64) ||
+ softnic_parser_read_uint8(&ttl, tokens[4]))
+ return 0;
+
+ a->encap.vxlan.ipv4.sa = rte_be_to_cpu_32(sa.s_addr);
+ a->encap.vxlan.ipv4.da = rte_be_to_cpu_32(da.s_addr);
+ a->encap.vxlan.ipv4.dscp = dscp;
+ a->encap.vxlan.ipv4.ttl = ttl;
+
+ n_tokens -= 5;
+ tokens += 5;
+ n += 5;
+ } else if (strcmp(tokens[0], "ipv6") == 0) {
+ struct in6_addr sa, da;
+ uint32_t flow_label;
+ uint8_t dscp, hop_limit;
+
+ if ((n_tokens < 6) ||
+ softnic_parse_ipv6_addr(tokens[1], &sa) ||
+ softnic_parse_ipv6_addr(tokens[2], &da) ||
+ softnic_parser_read_uint32(&flow_label, tokens[3]) ||
+ softnic_parser_read_uint8(&dscp, tokens[4]) ||
+ (dscp > 64) ||
+ softnic_parser_read_uint8(&hop_limit, tokens[5]))
+ return 0;
+
+ memcpy(a->encap.vxlan.ipv6.sa, sa.s6_addr, 16);
+ memcpy(a->encap.vxlan.ipv6.da, da.s6_addr, 16);
+ a->encap.vxlan.ipv6.flow_label = flow_label;
+ a->encap.vxlan.ipv6.dscp = dscp;
+ a->encap.vxlan.ipv6.hop_limit = hop_limit;
+
+ n_tokens -= 6;
+ tokens += 6;
+ n += 6;
+ } else
+ return 0;
+
+ /* udp <sp> <dp> */
+ if ((n_tokens < 3) ||
+ strcmp(tokens[0], "udp") ||
+ softnic_parser_read_uint16(&a->encap.vxlan.udp.sp, tokens[1]) ||
+ softnic_parser_read_uint16(&a->encap.vxlan.udp.dp, tokens[2]))
+ return 0;
+
+ n_tokens -= 3;
+ tokens += 3;
+ n += 3;
+
+ /* vxlan <vni> */
+ if ((n_tokens < 2) ||
+ strcmp(tokens[0], "vxlan") ||
+ softnic_parser_read_uint32(&a->encap.vxlan.vxlan.vni, tokens[1]) ||
+ (a->encap.vxlan.vxlan.vni > 0xFFFFFF))
+ return 0;
+
+ n_tokens -= 2;
+ tokens += 2;
+ n += 2;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + n;
+ }
+
return 0;
}
@@ -3669,6 +3987,400 @@ parse_table_action_time(char **tokens,
return 1;
}
+static void
+parse_free_sym_crypto_param_data(struct rte_table_action_sym_crypto_params *p)
+{
+ struct rte_crypto_sym_xform *xform[2] = {NULL};
+ uint32_t i;
+
+ xform[0] = p->xform;
+ if (xform[0])
+ xform[1] = xform[0]->next;
+
+ for (i = 0; i < 2; i++) {
+ if (xform[i] == NULL)
+ continue;
+
+ switch (xform[i]->type) {
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ if (xform[i]->cipher.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->cipher_auth.cipher_iv.val)
+ free(p->cipher_auth.cipher_iv.val);
+ if (p->cipher_auth.cipher_iv_update.val)
+ free(p->cipher_auth.cipher_iv_update.val);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (xform[i]->auth.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->cipher_auth.auth_iv.val)
+ free(p->cipher_auth.cipher_iv.val);
+ if (p->cipher_auth.auth_iv_update.val)
+ free(p->cipher_auth.cipher_iv_update.val);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ if (xform[i]->aead.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->aead.iv.val)
+ free(p->aead.iv.val);
+ if (p->aead.aad.val)
+ free(p->aead.aad.val);
+ break;
+ default:
+ continue;
+ }
+ }
+
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_cipher(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_cipher;
+ int status;
+ size_t len;
+
+ if (n_tokens < 7 || strcmp(tokens[1], "cipher_algo") ||
+ strcmp(tokens[3], "cipher_key") ||
+ strcmp(tokens[5], "cipher_iv"))
+ return NULL;
+
+ xform_cipher = calloc(1, sizeof(*xform_cipher));
+ if (xform_cipher == NULL)
+ return NULL;
+
+ xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher->cipher.op = encrypt ? RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ /* cipher_algo */
+ status = rte_cryptodev_get_cipher_algo_enum(
+ &xform_cipher->cipher.algo, tokens[2]);
+ if (status < 0)
+ goto error_exit;
+
+ /* cipher_key */
+ len = strlen(tokens[4]);
+ xform_cipher->cipher.key.data = calloc(1, len / 2 + 1);
+ if (xform_cipher->cipher.key.data == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[4],
+ xform_cipher->cipher.key.data,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher->cipher.key.length = (uint16_t)len;
+
+ /* cipher_iv */
+ len = strlen(tokens[6]);
+
+ p->cipher_auth.cipher_iv.val = calloc(1, len / 2 + 1);
+ if (p->cipher_auth.cipher_iv.val == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[6],
+ p->cipher_auth.cipher_iv.val,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher->cipher.iv.length = (uint16_t)len;
+ xform_cipher->cipher.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET;
+ p->cipher_auth.cipher_iv.length = (uint32_t)len;
+ *used_n_tokens = 7;
+
+ return xform_cipher;
+
+error_exit:
+ if (xform_cipher->cipher.key.data)
+ free(xform_cipher->cipher.key.data);
+
+ if (p->cipher_auth.cipher_iv.val) {
+ free(p->cipher_auth.cipher_iv.val);
+ p->cipher_auth.cipher_iv.val = NULL;
+ }
+
+ free(xform_cipher);
+
+ return NULL;
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_cipher_auth(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_cipher;
+ struct rte_crypto_sym_xform *xform_auth;
+ int status;
+ size_t len;
+
+ if (n_tokens < 13 ||
+ strcmp(tokens[7], "auth_algo") ||
+ strcmp(tokens[9], "auth_key") ||
+ strcmp(tokens[11], "digest_size"))
+ return NULL;
+
+ xform_auth = calloc(1, sizeof(*xform_auth));
+ if (xform_auth == NULL)
+ return NULL;
+
+ xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ xform_auth->auth.op = encrypt ? RTE_CRYPTO_AUTH_OP_GENERATE :
+ RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ /* auth_algo */
+ status = rte_cryptodev_get_auth_algo_enum(&xform_auth->auth.algo,
+ tokens[8]);
+ if (status < 0)
+ goto error_exit;
+
+ /* auth_key */
+ len = strlen(tokens[10]);
+ xform_auth->auth.key.data = calloc(1, len / 2 + 1);
+ if (xform_auth->auth.key.data == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[10],
+ xform_auth->auth.key.data, (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_auth->auth.key.length = (uint16_t)len;
+
+ if (strcmp(tokens[11], "digest_size"))
+ goto error_exit;
+
+ status = softnic_parser_read_uint16(&xform_auth->auth.digest_length,
+ tokens[12]);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher = parse_table_action_cipher(p, tokens, 7, encrypt,
+ used_n_tokens);
+ if (xform_cipher == NULL)
+ goto error_exit;
+
+ *used_n_tokens += 6;
+
+ if (encrypt) {
+ xform_cipher->next = xform_auth;
+ return xform_cipher;
+ } else {
+ xform_auth->next = xform_cipher;
+ return xform_auth;
+ }
+
+error_exit:
+ if (xform_auth->auth.key.data)
+ free(xform_auth->auth.key.data);
+ if (p->cipher_auth.auth_iv.val) {
+ free(p->cipher_auth.auth_iv.val);
+ p->cipher_auth.auth_iv.val = 0;
+ }
+
+ free(xform_auth);
+
+ return NULL;
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_aead(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_aead;
+ int status;
+ size_t len;
+
+ if (n_tokens < 11 || strcmp(tokens[1], "aead_algo") ||
+ strcmp(tokens[3], "aead_key") ||
+ strcmp(tokens[5], "aead_iv") ||
+ strcmp(tokens[7], "aead_aad") ||
+ strcmp(tokens[9], "digest_size"))
+ return NULL;
+
+ xform_aead = calloc(1, sizeof(*xform_aead));
+ if (xform_aead == NULL)
+ return NULL;
+
+ xform_aead->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ xform_aead->aead.op = encrypt ? RTE_CRYPTO_AEAD_OP_ENCRYPT :
+ RTE_CRYPTO_AEAD_OP_DECRYPT;
+
+ /* aead_algo */
+ status = rte_cryptodev_get_aead_algo_enum(&xform_aead->aead.algo,
+ tokens[2]);
+ if (status < 0)
+ goto error_exit;
+
+ /* aead_key */
+ len = strlen(tokens[4]);
+ xform_aead->aead.key.data = calloc(1, len / 2 + 1);
+ if (xform_aead->aead.key.data == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[4], xform_aead->aead.key.data,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.key.length = (uint16_t)len;
+
+ /* aead_iv */
+ len = strlen(tokens[6]);
+ p->aead.iv.val = calloc(1, len / 2 + 1);
+ if (p->aead.iv.val == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[6], p->aead.iv.val,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.iv.length = (uint16_t)len;
+ xform_aead->aead.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET;
+ p->aead.iv.length = (uint32_t)len;
+
+ /* aead_aad */
+ len = strlen(tokens[8]);
+ p->aead.aad.val = calloc(1, len / 2 + 1);
+ if (p->aead.aad.val == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[8], p->aead.aad.val, (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.aad_length = (uint16_t)len;
+ p->aead.aad.length = (uint32_t)len;
+
+ /* digest_size */
+ status = softnic_parser_read_uint16(&xform_aead->aead.digest_length,
+ tokens[10]);
+ if (status < 0)
+ goto error_exit;
+
+ *used_n_tokens = 11;
+
+ return xform_aead;
+
+error_exit:
+ if (xform_aead->aead.key.data)
+ free(xform_aead->aead.key.data);
+ if (p->aead.iv.val) {
+ free(p->aead.iv.val);
+ p->aead.iv.val = NULL;
+ }
+ if (p->aead.aad.val) {
+ free(p->aead.aad.val);
+ p->aead.aad.val = NULL;
+ }
+
+ free(xform_aead);
+
+ return NULL;
+}
+
+
+static uint32_t
+parse_table_action_sym_crypto(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ struct rte_table_action_sym_crypto_params *p = &a->sym_crypto;
+ struct rte_crypto_sym_xform *xform = NULL;
+ uint32_t used_n_tokens;
+ uint32_t encrypt;
+ int status;
+
+ if ((n_tokens < 12) ||
+ strcmp(tokens[0], "sym_crypto") ||
+ strcmp(tokens[2], "type"))
+ return 0;
+
+ memset(p, 0, sizeof(*p));
+
+ if (strcmp(tokens[1], "encrypt") == 0)
+ encrypt = 1;
+ else
+ encrypt = 0;
+
+ status = softnic_parser_read_uint32(&p->data_offset, tokens[n_tokens - 1]);
+ if (status < 0)
+ return 0;
+
+ if (strcmp(tokens[3], "cipher") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_cipher(p, tokens, n_tokens, encrypt,
+ &used_n_tokens);
+ } else if (strcmp(tokens[3], "cipher_auth") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_cipher_auth(p, tokens, n_tokens,
+ encrypt, &used_n_tokens);
+ } else if (strcmp(tokens[3], "aead") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_aead(p, tokens, n_tokens, encrypt,
+ &used_n_tokens);
+ }
+
+ if (xform == NULL)
+ return 0;
+
+ p->xform = xform;
+
+ if (strcmp(tokens[used_n_tokens], "data_offset")) {
+ parse_free_sym_crypto_param_data(p);
+ return 0;
+ }
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_SYM_CRYPTO;
+
+ return used_n_tokens + 5;
+}
+
+static uint32_t
+parse_table_action_tag(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "tag"))
+ return 0;
+
+ if (softnic_parser_read_uint32(&a->tag.tag, tokens[1]))
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
+ return 2;
+}
+
+static uint32_t
+parse_table_action_decap(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "decap"))
+ return 0;
+
+ if (softnic_parser_read_uint16(&a->decap.n, tokens[1]))
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
+ return 2;
+}
+
static uint32_t
parse_table_action(char **tokens,
uint32_t n_tokens,
@@ -3813,6 +4525,47 @@ parse_table_action(char **tokens,
n_tokens -= n;
}
+ if (n_tokens && (strcmp(tokens[0], "tag") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_tag(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action tag");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "decap") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_decap(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action decap");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "sym_crypto") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_sym_crypto(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action sym_crypto");
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
if (n_tokens0 - n_tokens == 1) {
snprintf(out, out_size, MSG_ARG_INVALID, "action");
return 0;
@@ -4797,6 +5550,81 @@ cmd_softnic_thread_pipeline_disable(struct pmd_internals *softnic,
}
}
+/**
+ * flowapi map
+ * group <group_id>
+ * ingress | egress
+ * pipeline <pipeline_name>
+ * table <table_id>
+ */
+static void
+cmd_softnic_flowapi_map(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t group_id, table_id;
+ int ingress, status;
+
+ if (n_tokens != 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "map") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "map");
+ return;
+ }
+
+ if (strcmp(tokens[2], "group") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "group");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&group_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "group_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "ingress") == 0) {
+ ingress = 1;
+ } else if (strcmp(tokens[4], "egress") == 0) {
+ ingress = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ingress | egress");
+ return;
+ }
+
+ if (strcmp(tokens[5], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[6];
+
+ if (strcmp(tokens[7], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ status = flow_attr_map_set(softnic,
+ group_id,
+ ingress,
+ pipeline_name,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
void
softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
{
@@ -4877,6 +5705,11 @@ softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
return;
}
+ if (strcmp(tokens[0], "cryptodev") == 0) {
+ cmd_cryptodev(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
if (strcmp(tokens[0], "port") == 0) {
cmd_port_in_action_profile(softnic, tokens, n_tokens, out, out_size);
return;
@@ -5089,6 +5922,12 @@ softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
}
}
+ if (strcmp(tokens[0], "flowapi") == 0) {
+ cmd_softnic_flowapi_map(softnic, tokens, n_tokens, out,
+ out_size);
+ return;
+ }
+
snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
}
diff --git a/drivers/net/softnic/rte_eth_softnic_cryptodev.c b/drivers/net/softnic/rte_eth_softnic_cryptodev.c
new file mode 100644
index 00000000..1480f6dd
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_cryptodev.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_cryptodev_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->cryptodev_list);
+
+ return 0;
+}
+
+void
+softnic_cryptodev_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_cryptodev *cryptodev;
+
+ cryptodev = TAILQ_FIRST(&p->cryptodev_list);
+ if (cryptodev == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->cryptodev_list, cryptodev, node);
+ free(cryptodev);
+ }
+}
+
+struct softnic_cryptodev *
+softnic_cryptodev_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_cryptodev *cryptodev;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(cryptodev, &p->cryptodev_list, node)
+ if (strcmp(cryptodev->name, name) == 0)
+ return cryptodev;
+
+ return NULL;
+}
+
+struct softnic_cryptodev *
+softnic_cryptodev_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_cryptodev_params *params)
+{
+ struct rte_cryptodev_info dev_info;
+ struct rte_cryptodev_config dev_conf;
+ struct rte_cryptodev_qp_conf queue_conf;
+ struct softnic_cryptodev *cryptodev;
+ uint32_t dev_id, i;
+ uint32_t socket_id;
+ int status;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ softnic_cryptodev_find(p, name) ||
+ (params->n_queues == 0) ||
+ (params->queue_size == 0))
+ return NULL;
+
+ if (params->dev_name) {
+ status = rte_cryptodev_get_dev_id(params->dev_name);
+ if (status == -1)
+ return NULL;
+
+ dev_id = (uint32_t)status;
+ } else {
+ if (rte_cryptodev_pmd_is_valid_dev(params->dev_id) == 0)
+ return NULL;
+
+ dev_id = params->dev_id;
+ }
+
+ socket_id = rte_cryptodev_socket_id(dev_id);
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
+ if (dev_info.max_nb_queue_pairs < params->n_queues)
+ return NULL;
+ if (dev_info.feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)
+ return NULL;
+
+ dev_conf.socket_id = socket_id;
+ dev_conf.nb_queue_pairs = params->n_queues;
+
+ status = rte_cryptodev_configure(dev_id, &dev_conf);
+ if (status < 0)
+ return NULL;
+
+ queue_conf.nb_descriptors = params->queue_size;
+ for (i = 0; i < params->n_queues; i++) {
+ status = rte_cryptodev_queue_pair_setup(dev_id, i,
+ &queue_conf, socket_id, NULL);
+ if (status < 0)
+ return NULL;
+ }
+
+ if (rte_cryptodev_start(dev_id) < 0)
+ return NULL;
+
+ cryptodev = calloc(1, sizeof(struct softnic_cryptodev));
+ if (cryptodev == NULL) {
+ rte_cryptodev_stop(dev_id);
+ return NULL;
+ }
+
+ strlcpy(cryptodev->name, name, sizeof(cryptodev->name));
+ cryptodev->dev_id = dev_id;
+ cryptodev->n_queues = params->n_queues;
+
+ TAILQ_INSERT_TAIL(&p->cryptodev_list, cryptodev, node);
+
+ return cryptodev;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_flow.c b/drivers/net/softnic/rte_eth_softnic_flow.c
new file mode 100644
index 00000000..285af462
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_flow.c
@@ -0,0 +1,2287 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+#define rte_htons rte_cpu_to_be_16
+#define rte_htonl rte_cpu_to_be_32
+
+#define rte_ntohs rte_be_to_cpu_16
+#define rte_ntohl rte_be_to_cpu_32
+
+static struct rte_flow *
+softnic_flow_find(struct softnic_table *table,
+ struct softnic_table_rule_match *rule_match)
+{
+ struct rte_flow *flow;
+
+ TAILQ_FOREACH(flow, &table->flows, node)
+ if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
+ return flow;
+
+ return NULL;
+}
+
+int
+flow_attr_map_set(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress,
+ const char *pipeline_name,
+ uint32_t table_id)
+{
+ struct pipeline *pipeline;
+ struct flow_attr_map *map;
+
+ if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
+ pipeline_name == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ table_id >= pipeline->n_tables)
+ return -1;
+
+ map = (ingress) ? &softnic->flow.ingress_map[group_id] :
+ &softnic->flow.egress_map[group_id];
+ strcpy(map->pipeline_name, pipeline_name);
+ map->table_id = table_id;
+ map->valid = 1;
+
+ return 0;
+}
+
+struct flow_attr_map *
+flow_attr_map_get(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress)
+{
+ if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
+ return NULL;
+
+ return (ingress) ? &softnic->flow.ingress_map[group_id] :
+ &softnic->flow.egress_map[group_id];
+}
+
+static int
+flow_pipeline_table_get(struct pmd_internals *softnic,
+ const struct rte_flow_attr *attr,
+ const char **pipeline_name,
+ uint32_t *table_id,
+ struct rte_flow_error *error)
+{
+ struct flow_attr_map *map;
+
+ if (attr == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "Null attr");
+
+ if (!attr->ingress && !attr->egress)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr,
+ "Ingress/egress not specified");
+
+ if (attr->ingress && attr->egress)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr,
+ "Setting both ingress and egress is not allowed");
+
+ map = flow_attr_map_get(softnic,
+ attr->group,
+ attr->ingress);
+ if (map == NULL ||
+ map->valid == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr,
+ "Invalid group ID");
+
+ if (pipeline_name)
+ *pipeline_name = map->pipeline_name;
+
+ if (table_id)
+ *table_id = map->table_id;
+
+ return 0;
+}
+
+union flow_item {
+ uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_icmp icmp;
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_sctp sctp;
+ struct rte_flow_item_vxlan vxlan;
+ struct rte_flow_item_e_tag e_tag;
+ struct rte_flow_item_nvgre nvgre;
+ struct rte_flow_item_mpls mpls;
+ struct rte_flow_item_gre gre;
+ struct rte_flow_item_gtp gtp;
+ struct rte_flow_item_esp esp;
+ struct rte_flow_item_geneve geneve;
+ struct rte_flow_item_vxlan_gpe vxlan_gpe;
+ struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
+ struct rte_flow_item_ipv6_ext ipv6_ext;
+ struct rte_flow_item_icmp6 icmp6;
+ struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
+ struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
+ struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
+ struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
+ struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
+};
+
+static const union flow_item flow_item_raw_mask;
+
+static int
+flow_item_is_proto(enum rte_flow_item_type type,
+ const void **mask,
+ size_t *size)
+{
+ switch (type) {
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ *mask = &flow_item_raw_mask;
+ *size = sizeof(flow_item_raw_mask);
+ return 1; /* TRUE */
+
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *mask = &rte_flow_item_eth_mask;
+ *size = sizeof(struct rte_flow_item_eth);
+ return 1; /* TRUE */
+
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ *mask = &rte_flow_item_vlan_mask;
+ *size = sizeof(struct rte_flow_item_vlan);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ *mask = &rte_flow_item_ipv4_mask;
+ *size = sizeof(struct rte_flow_item_ipv4);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ *mask = &rte_flow_item_ipv6_mask;
+ *size = sizeof(struct rte_flow_item_ipv6);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ *mask = &rte_flow_item_icmp_mask;
+ *size = sizeof(struct rte_flow_item_icmp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ *mask = &rte_flow_item_udp_mask;
+ *size = sizeof(struct rte_flow_item_udp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ *mask = &rte_flow_item_tcp_mask;
+ *size = sizeof(struct rte_flow_item_tcp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ *mask = &rte_flow_item_sctp_mask;
+ *size = sizeof(struct rte_flow_item_sctp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ *mask = &rte_flow_item_vxlan_mask;
+ *size = sizeof(struct rte_flow_item_vxlan);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_E_TAG:
+ *mask = &rte_flow_item_e_tag_mask;
+ *size = sizeof(struct rte_flow_item_e_tag);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ *mask = &rte_flow_item_nvgre_mask;
+ *size = sizeof(struct rte_flow_item_nvgre);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ *mask = &rte_flow_item_mpls_mask;
+ *size = sizeof(struct rte_flow_item_mpls);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ *mask = &rte_flow_item_gre_mask;
+ *size = sizeof(struct rte_flow_item_gre);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_GTP:
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ *mask = &rte_flow_item_gtp_mask;
+ *size = sizeof(struct rte_flow_item_gtp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ *mask = &rte_flow_item_esp_mask;
+ *size = sizeof(struct rte_flow_item_esp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ *mask = &rte_flow_item_geneve_mask;
+ *size = sizeof(struct rte_flow_item_geneve);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ *mask = &rte_flow_item_vxlan_gpe_mask;
+ *size = sizeof(struct rte_flow_item_vxlan_gpe);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
+ *mask = &rte_flow_item_arp_eth_ipv4_mask;
+ *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
+ *mask = &rte_flow_item_ipv6_ext_mask;
+ *size = sizeof(struct rte_flow_item_ipv6_ext);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6:
+ *mask = &rte_flow_item_icmp6_mask;
+ *size = sizeof(struct rte_flow_item_icmp6);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
+ *mask = &rte_flow_item_icmp6_nd_ns_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
+ *mask = &rte_flow_item_icmp6_nd_na_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_na);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
+ *mask = &rte_flow_item_icmp6_nd_opt_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
+ *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
+ *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
+ return 1;
+
+ default: return 0; /* FALSE */
+ }
+}
+
+static int
+flow_item_raw_preprocess(const struct rte_flow_item *item,
+ union flow_item *item_spec,
+ union flow_item *item_mask,
+ size_t *item_size,
+ int *item_disabled,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_raw *item_raw_spec = item->spec;
+ const struct rte_flow_item_raw *item_raw_mask = item->mask;
+ const uint8_t *pattern;
+ const uint8_t *pattern_mask;
+ uint8_t *spec = (uint8_t *)item_spec;
+ uint8_t *mask = (uint8_t *)item_mask;
+ size_t pattern_length, pattern_offset, i;
+ int disabled;
+
+ if (!item->spec)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Null specification");
+
+ if (item->last)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Range not allowed (last must be NULL)");
+
+ if (item_raw_spec->relative == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Absolute offset not supported");
+
+ if (item_raw_spec->search)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Search not supported");
+
+ if (item_raw_spec->offset < 0)
+ return rte_flow_error_set(error,
+ ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Negative offset not supported");
+
+ if (item_raw_spec->length == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Zero pattern length");
+
+ if (item_raw_spec->offset + item_raw_spec->length >
+ TABLE_RULE_MATCH_SIZE_MAX)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Item too big");
+
+ if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Non-NULL pattern mask not allowed with NULL pattern");
+
+ pattern = item_raw_spec->pattern;
+ pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL;
+ pattern_length = (size_t)item_raw_spec->length;
+ pattern_offset = (size_t)item_raw_spec->offset;
+
+ disabled = 0;
+ if (pattern_mask == NULL)
+ disabled = 1;
+ else
+ for (i = 0; i < pattern_length; i++)
+ if ((pattern)[i])
+ disabled = 1;
+
+ memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX);
+ if (pattern)
+ memcpy(&spec[pattern_offset], pattern, pattern_length);
+
+ memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX);
+ if (pattern_mask)
+ memcpy(&mask[pattern_offset], pattern_mask, pattern_length);
+
+ *item_size = pattern_offset + pattern_length;
+ *item_disabled = disabled;
+
+ return 0;
+}
+
+static int
+flow_item_proto_preprocess(const struct rte_flow_item *item,
+ union flow_item *item_spec,
+ union flow_item *item_mask,
+ size_t *item_size,
+ int *item_disabled,
+ struct rte_flow_error *error)
+{
+ const void *mask_default;
+ uint8_t *spec = (uint8_t *)item_spec;
+ uint8_t *mask = (uint8_t *)item_mask;
+ size_t size, i;
+
+ if (!flow_item_is_proto(item->type, &mask_default, &size))
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Item type not supported");
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW)
+ return flow_item_raw_preprocess(item,
+ item_spec,
+ item_mask,
+ item_size,
+ item_disabled,
+ error);
+
+ /* spec */
+ if (!item->spec) {
+ /* If spec is NULL, then last and mask also have to be NULL. */
+ if (item->last || item->mask)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid item (NULL spec with non-NULL last or mask)");
+
+ memset(item_spec, 0, size);
+ memset(item_mask, 0, size);
+ *item_size = size;
+ *item_disabled = 1; /* TRUE */
+ return 0;
+ }
+
+ memcpy(spec, item->spec, size);
+ *item_size = size;
+
+ /* mask */
+ if (item->mask)
+ memcpy(mask, item->mask, size);
+ else
+ memcpy(mask, mask_default, size);
+
+ /* disabled */
+ for (i = 0; i < size; i++)
+ if (mask[i])
+ break;
+ *item_disabled = (i == size) ? 1 : 0;
+
+ /* Apply mask over spec. */
+ for (i = 0; i < size; i++)
+ spec[i] &= mask[i];
+
+ /* last */
+ if (item->last) {
+ uint8_t last[size];
+
+ /* init last */
+ memcpy(last, item->last, size);
+ for (i = 0; i < size; i++)
+ last[i] &= mask[i];
+
+ /* check for range */
+ for (i = 0; i < size; i++)
+ if (last[i] != spec[i])
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Range not supported");
+ }
+
+ return 0;
+}
+
+/***
+ * Skip disabled protocol items and VOID items
+ * until any of the mutually exclusive conditions
+ * from the list below takes place:
+ * (A) A protocol present in the proto_mask
+ * is met (either ENABLED or DISABLED);
+ * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
+ * (C) The END item is met.
+ */
+static int
+flow_item_skip_disabled_protos(const struct rte_flow_item **item,
+ uint64_t proto_mask,
+ size_t *length,
+ struct rte_flow_error *error)
+{
+ size_t len = 0;
+
+ for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
+ union flow_item spec, mask;
+ size_t size;
+ int disabled = 0, status;
+
+ if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ status = flow_item_proto_preprocess(*item,
+ &spec,
+ &mask,
+ &size,
+ &disabled,
+ error);
+ if (status)
+ return status;
+
+ if ((proto_mask & (1LLU << (*item)->type)) ||
+ !disabled)
+ break;
+
+ len += size;
+ }
+
+ if (length)
+ *length = len;
+
+ return 0;
+}
+
+#define FLOW_ITEM_PROTO_IP \
+ ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
+ (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
+
+static void
+flow_item_skip_void(const struct rte_flow_item **item)
+{
+ for ( ; ; (*item)++)
+ if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return;
+}
+
+#define IP_PROTOCOL_TCP 0x06
+#define IP_PROTOCOL_UDP 0x11
+#define IP_PROTOCOL_SCTP 0x84
+
+static int
+mask_to_depth(uint64_t mask,
+ uint32_t *depth)
+{
+ uint64_t n;
+
+ if (mask == UINT64_MAX) {
+ if (depth)
+ *depth = 64;
+
+ return 0;
+ }
+
+ mask = ~mask;
+
+ if (mask & (mask + 1))
+ return -1;
+
+ n = __builtin_popcountll(mask);
+ if (depth)
+ *depth = (uint32_t)(64 - n);
+
+ return 0;
+}
+
+static int
+ipv4_mask_to_depth(uint32_t mask,
+ uint32_t *depth)
+{
+ uint32_t d;
+ int status;
+
+ status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
+ if (status)
+ return status;
+
+ d -= 32;
+ if (depth)
+ *depth = d;
+
+ return 0;
+}
+
+static int
+ipv6_mask_to_depth(uint8_t *mask,
+ uint32_t *depth)
+{
+ uint64_t *m = (uint64_t *)mask;
+ uint64_t m0 = rte_be_to_cpu_64(m[0]);
+ uint64_t m1 = rte_be_to_cpu_64(m[1]);
+ uint32_t d0, d1;
+ int status;
+
+ status = mask_to_depth(m0, &d0);
+ if (status)
+ return status;
+
+ status = mask_to_depth(m1, &d1);
+ if (status)
+ return status;
+
+ if (d0 < 64 && d1)
+ return -1;
+
+ if (depth)
+ *depth = d0 + d1;
+
+ return 0;
+}
+
+static int
+port_mask_to_range(uint16_t port,
+ uint16_t port_mask,
+ uint16_t *port0,
+ uint16_t *port1)
+{
+ int status;
+ uint16_t p0, p1;
+
+ status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
+ if (status)
+ return -1;
+
+ p0 = port & port_mask;
+ p1 = p0 | ~port_mask;
+
+ if (port0)
+ *port0 = p0;
+
+ if (port1)
+ *port1 = p1;
+
+ return 0;
+}
+
+static int
+flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
+ struct pipeline *pipeline __rte_unused,
+ struct softnic_table *table __rte_unused,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *item,
+ struct softnic_table_rule_match *rule_match,
+ struct rte_flow_error *error)
+{
+ union flow_item spec, mask;
+ size_t size, length = 0;
+ int disabled = 0, status;
+ uint8_t ip_proto, ip_proto_mask;
+
+ memset(rule_match, 0, sizeof(*rule_match));
+ rule_match->match_type = TABLE_ACL;
+ rule_match->match.acl.priority = attr->priority;
+
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item,
+ FLOW_ITEM_PROTO_IP, &length, error);
+ if (status)
+ return status;
+
+ /* IP only. */
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ {
+ uint32_t sa_depth, da_depth;
+
+ status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
+ &sa_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv4 header source address mask");
+
+ status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
+ &da_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv4 header destination address mask");
+
+ ip_proto = spec.ipv4.hdr.next_proto_id;
+ ip_proto_mask = mask.ipv4.hdr.next_proto_id;
+
+ rule_match->match.acl.ip_version = 1;
+ rule_match->match.acl.ipv4.sa =
+ rte_ntohl(spec.ipv4.hdr.src_addr);
+ rule_match->match.acl.ipv4.da =
+ rte_ntohl(spec.ipv4.hdr.dst_addr);
+ rule_match->match.acl.sa_depth = sa_depth;
+ rule_match->match.acl.da_depth = da_depth;
+ rule_match->match.acl.proto = ip_proto;
+ rule_match->match.acl.proto_mask = ip_proto_mask;
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_IPV4 */
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ {
+ uint32_t sa_depth, da_depth;
+
+ status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv6 header source address mask");
+
+ status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv6 header destination address mask");
+
+ ip_proto = spec.ipv6.hdr.proto;
+ ip_proto_mask = mask.ipv6.hdr.proto;
+
+ rule_match->match.acl.ip_version = 0;
+ memcpy(rule_match->match.acl.ipv6.sa,
+ spec.ipv6.hdr.src_addr,
+ sizeof(spec.ipv6.hdr.src_addr));
+ memcpy(rule_match->match.acl.ipv6.da,
+ spec.ipv6.hdr.dst_addr,
+ sizeof(spec.ipv6.hdr.dst_addr));
+ rule_match->match.acl.sa_depth = sa_depth;
+ rule_match->match.acl.da_depth = da_depth;
+ rule_match->match.acl.proto = ip_proto;
+ rule_match->match.acl.proto_mask = ip_proto_mask;
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_IPV6 */
+
+ default:
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: IP protocol required");
+ } /* switch */
+
+ if (ip_proto_mask != UINT8_MAX)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IP protocol mask");
+
+ item++;
+
+ /* VOID only, if any. */
+ flow_item_skip_void(&item);
+
+ /* TCP/UDP/SCTP only. */
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ {
+ uint16_t sp0, sp1, dp0, dp1;
+
+ if (ip_proto != IP_PROTOCOL_TCP)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Item type is TCP, but IP protocol is not");
+
+ status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
+ rte_ntohs(mask.tcp.hdr.src_port),
+ &sp0,
+ &sp1);
+
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal TCP source port mask");
+
+ status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
+ rte_ntohs(mask.tcp.hdr.dst_port),
+ &dp0,
+ &dp1);
+
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal TCP destination port mask");
+
+ rule_match->match.acl.sp0 = sp0;
+ rule_match->match.acl.sp1 = sp1;
+ rule_match->match.acl.dp0 = dp0;
+ rule_match->match.acl.dp1 = dp1;
+
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_TCP */
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ {
+ uint16_t sp0, sp1, dp0, dp1;
+
+ if (ip_proto != IP_PROTOCOL_UDP)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Item type is UDP, but IP protocol is not");
+
+ status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
+ rte_ntohs(mask.udp.hdr.src_port),
+ &sp0,
+ &sp1);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal UDP source port mask");
+
+ status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
+ rte_ntohs(mask.udp.hdr.dst_port),
+ &dp0,
+ &dp1);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal UDP destination port mask");
+
+ rule_match->match.acl.sp0 = sp0;
+ rule_match->match.acl.sp1 = sp1;
+ rule_match->match.acl.dp0 = dp0;
+ rule_match->match.acl.dp1 = dp1;
+
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_UDP */
+
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ {
+ uint16_t sp0, sp1, dp0, dp1;
+
+ if (ip_proto != IP_PROTOCOL_SCTP)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Item type is SCTP, but IP protocol is not");
+
+ status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
+ rte_ntohs(mask.sctp.hdr.src_port),
+ &sp0,
+ &sp1);
+
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal SCTP source port mask");
+
+ status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
+ rte_ntohs(mask.sctp.hdr.dst_port),
+ &dp0,
+ &dp1);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal SCTP destination port mask");
+
+ rule_match->match.acl.sp0 = sp0;
+ rule_match->match.acl.sp1 = sp1;
+ rule_match->match.acl.dp0 = dp0;
+ rule_match->match.acl.dp1 = dp1;
+
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_SCTP */
+
+ default:
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: TCP/UDP/SCTP required");
+ } /* switch */
+
+ item++;
+
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
+ if (status)
+ return status;
+
+ /* END only. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Expecting END item");
+
+ return 0;
+}
+
+/***
+ * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
+ * respectively.
+ * They are located within a larger buffer at offsets *toffset* and *foffset*
+ * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
+ * buffer.
+ * Question: are the two masks equivalent?
+ *
+ * Notes:
+ * 1. Offset basically indicates that the first offset bytes in the buffer
+ * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
+ * array of *offset* bytes to the *mask*.
+ * 2. Each *mask* might contain a number of zero bytes at the beginning or
+ * at the end.
+ * 3. Bytes in the larger buffer after the end of the *mask* are also considered
+ * "don't care", so they are equivalent to appending an "all-zeros" array of
+ * bytes to the *mask*.
+ *
+ * Example:
+ * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
+ * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
+ * => buffer mask = [00 00 00 22 00 33 00 00]
+ * fmask = [22 00 33], foffset = 3, fsize = 3 =>
+ * => buffer mask = [00 00 00 22 00 33 00 00]
+ * Therefore, the tmask and fmask from this example are equivalent.
+ */
+static int
+hash_key_mask_is_same(uint8_t *tmask,
+ size_t toffset,
+ size_t tsize,
+ uint8_t *fmask,
+ size_t foffset,
+ size_t fsize,
+ size_t *toffset_plus,
+ size_t *foffset_plus)
+{
+ size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
+ size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
+
+ /* Compute tpos and fpos. */
+ for (tpos = 0; tmask[tpos] == 0; tpos++)
+ ;
+ for (fpos = 0; fmask[fpos] == 0; fpos++)
+ ;
+
+ if (toffset + tpos != foffset + fpos)
+ return 0; /* FALSE */
+
+ tsize -= tpos;
+ fsize -= fpos;
+
+ if (tsize < fsize) {
+ size_t i;
+
+ for (i = 0; i < tsize; i++)
+ if (tmask[tpos + i] != fmask[fpos + i])
+ return 0; /* FALSE */
+
+ for ( ; i < fsize; i++)
+ if (fmask[fpos + i])
+ return 0; /* FALSE */
+ } else {
+ size_t i;
+
+ for (i = 0; i < fsize; i++)
+ if (tmask[tpos + i] != fmask[fpos + i])
+ return 0; /* FALSE */
+
+ for ( ; i < tsize; i++)
+ if (tmask[tpos + i])
+ return 0; /* FALSE */
+ }
+
+ if (toffset_plus)
+ *toffset_plus = tpos;
+
+ if (foffset_plus)
+ *foffset_plus = fpos;
+
+ return 1; /* TRUE */
+}
+
+static int
+flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
+ struct pipeline *pipeline __rte_unused,
+ struct softnic_table *table,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item *item,
+ struct softnic_table_rule_match *rule_match,
+ struct rte_flow_error *error)
+{
+ struct softnic_table_rule_match_hash key, key_mask;
+ struct softnic_table_hash_params *params = &table->params.match.hash;
+ size_t offset = 0, length = 0, tpos, fpos;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ memset(&key_mask, 0, sizeof(key_mask));
+
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
+ if (status)
+ return status;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "HASH: END detected too early");
+
+ /* VOID or any protocols (enabled or disabled). */
+ for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ union flow_item spec, mask;
+ size_t size;
+ int disabled, status;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ status = flow_item_proto_preprocess(item,
+ &spec,
+ &mask,
+ &size,
+ &disabled,
+ error);
+ if (status)
+ return status;
+
+ if (length + size > sizeof(key)) {
+ if (disabled)
+ break;
+
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "HASH: Item too big");
+ }
+
+ memcpy(&key.key[length], &spec, size);
+ memcpy(&key_mask.key[length], &mask, size);
+ length += size;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
+ if (status)
+ return status;
+
+ /* END only. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "HASH: Expecting END item");
+ }
+
+ /* Compare flow key mask against table key mask. */
+ offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+
+ if (!hash_key_mask_is_same(params->key_mask,
+ params->key_offset,
+ params->key_size,
+ key_mask.key,
+ offset,
+ length,
+ &tpos,
+ &fpos))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "HASH: Item list is not observing the match format");
+
+ /* Rule match. */
+ memset(rule_match, 0, sizeof(*rule_match));
+ rule_match->match_type = TABLE_HASH;
+ memcpy(&rule_match->match.hash.key[tpos],
+ &key.key[fpos],
+ RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
+ length - fpos));
+
+ return 0;
+}
+
+static int
+flow_rule_match_get(struct pmd_internals *softnic,
+ struct pipeline *pipeline,
+ struct softnic_table *table,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *item,
+ struct softnic_table_rule_match *rule_match,
+ struct rte_flow_error *error)
+{
+ switch (table->params.match_type) {
+ case TABLE_ACL:
+ return flow_rule_match_acl_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ rule_match,
+ error);
+
+ /* FALLTHROUGH */
+
+ case TABLE_HASH:
+ return flow_rule_match_hash_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ rule_match,
+ error);
+
+ /* FALLTHROUGH */
+
+ default:
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Unsupported pipeline table match type");
+ }
+}
+
+static int
+flow_rule_action_get(struct pmd_internals *softnic,
+ struct pipeline *pipeline,
+ struct softnic_table *table,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *action,
+ struct softnic_table_rule_action *rule_action,
+ struct rte_flow_error *error)
+{
+ struct softnic_table_action_profile *profile;
+ struct softnic_table_action_profile_params *params;
+ int n_jump_queue_rss_drop = 0;
+ int n_count = 0;
+ int n_mark = 0;
+ int n_vxlan_decap = 0;
+
+ profile = softnic_table_action_profile_find(softnic,
+ table->params.action_profile_name);
+ if (profile == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ action,
+ "JUMP: Table action profile");
+
+ params = &profile->params;
+
+ for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ {
+ const struct rte_flow_action_jump *conf = action->conf;
+ struct flow_attr_map *map;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "JUMP: Null configuration");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is"
+ " allowed per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "JUMP action not enabled for this table");
+
+ n_jump_queue_rss_drop = 1;
+
+ map = flow_attr_map_get(softnic,
+ conf->group,
+ attr->ingress);
+ if (map == NULL || map->valid == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "JUMP: Invalid group mapping");
+
+ if (strcmp(pipeline->name, map->pipeline_name) != 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "JUMP: Jump to table in different pipeline");
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ rule_action->fwd.id = map->table_id;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_JUMP */
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ char name[NAME_SIZE];
+ struct rte_eth_dev *dev;
+ const struct rte_flow_action_queue *conf = action->conf;
+ uint32_t port_id;
+ int status;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "QUEUE: Null configuration");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is allowed"
+ " per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "QUEUE action not enabled for this table");
+
+ n_jump_queue_rss_drop = 1;
+
+ dev = ETHDEV(softnic);
+ if (dev == NULL ||
+ conf->index >= dev->data->nb_rx_queues)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "QUEUE: Invalid RX queue ID");
+
+ sprintf(name, "RXQ%u", (uint32_t)conf->index);
+
+ status = softnic_pipeline_port_out_find(softnic,
+ pipeline->name,
+ name,
+ &port_id);
+ if (status)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "QUEUE: RX queue not accessible from this pipeline");
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
+ rule_action->fwd.id = port_id;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /*RTE_FLOW_ACTION_TYPE_QUEUE */
+
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ {
+ const struct rte_flow_action_rss *conf = action->conf;
+ uint32_t i;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "RSS: Null configuration");
+
+ if (!rte_is_power_of_2(conf->queue_num))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf,
+ "RSS: Number of queues must be a power of 2");
+
+ if (conf->queue_num > RTE_DIM(rule_action->lb.out))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf,
+ "RSS: Number of queues too big");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is allowed per flow");
+
+ if (((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
+ ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_LB)) == 0))
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "RSS action not supported by this table");
+
+ if (params->lb.out_offset !=
+ pipeline->params.offset_port_id)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "RSS action not supported by this pipeline");
+
+ n_jump_queue_rss_drop = 1;
+
+ /* RTE_TABLE_ACTION_LB */
+ for (i = 0; i < conf->queue_num; i++) {
+ char name[NAME_SIZE];
+ struct rte_eth_dev *dev;
+ uint32_t port_id;
+ int status;
+
+ dev = ETHDEV(softnic);
+ if (dev == NULL ||
+ conf->queue[i] >=
+ dev->data->nb_rx_queues)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "RSS: Invalid RX queue ID");
+
+ sprintf(name, "RXQ%u",
+ (uint32_t)conf->queue[i]);
+
+ status = softnic_pipeline_port_out_find(softnic,
+ pipeline->name,
+ name,
+ &port_id);
+ if (status)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "RSS: RX queue not accessible from this pipeline");
+
+ rule_action->lb.out[i] = port_id;
+ }
+
+ for ( ; i < RTE_DIM(rule_action->lb.out); i++)
+ rule_action->lb.out[i] =
+ rule_action->lb.out[i % conf->queue_num];
+
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_RSS */
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ {
+ const void *conf = action->conf;
+
+ if (conf != NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "DROP: No configuration required");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is allowed per flow");
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "DROP action not supported by this table");
+
+ n_jump_queue_rss_drop = 1;
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_DROP */
+
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ {
+ const struct rte_flow_action_count *conf = action->conf;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "COUNT: Null configuration");
+
+ if (conf->shared)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf,
+ "COUNT: Shared counters not supported");
+
+ if (n_count)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one COUNT action per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "COUNT action not supported by this table");
+
+ n_count = 1;
+
+ /* RTE_TABLE_ACTION_STATS */
+ rule_action->stats.n_packets = 0;
+ rule_action->stats.n_bytes = 0;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_COUNT */
+
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ const struct rte_flow_action_mark *conf = action->conf;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "MARK: Null configuration");
+
+ if (n_mark)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one MARK action per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_TAG)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "MARK action not supported by this table");
+
+ n_mark = 1;
+
+ /* RTE_TABLE_ACTION_TAG */
+ rule_action->tag.tag = conf->id;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_MARK */
+
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ {
+ const struct rte_flow_action_mark *conf = action->conf;
+
+ if (conf)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "VXLAN DECAP: Non-null configuration");
+
+ if (n_vxlan_decap)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one VXLAN DECAP action per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_DECAP)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "VXLAN DECAP action not supported by this table");
+
+ n_vxlan_decap = 1;
+
+ /* RTE_TABLE_ACTION_DECAP */
+ rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */
+
+ case RTE_FLOW_ACTION_TYPE_METER:
+ {
+ const struct rte_flow_action_meter *conf = action->conf;
+ struct softnic_mtr_meter_profile *mp;
+ struct softnic_mtr *m;
+ uint32_t table_id = table - pipeline->table;
+ uint32_t meter_profile_id;
+ int status;
+
+ if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "METER: Table action not supported");
+
+ if (params->mtr.n_tc != 1)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "METER: Multiple TCs not supported");
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "METER: Null configuration");
+
+ m = softnic_mtr_find(softnic, conf->mtr_id);
+
+ if (m == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "METER: Invalid meter ID");
+
+ if (m->flow)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "METER: Meter already attached to a flow");
+
+ meter_profile_id = m->params.meter_profile_id;
+ mp = softnic_mtr_meter_profile_find(softnic, meter_profile_id);
+
+ /* Add meter profile to pipeline table */
+ if (!softnic_pipeline_table_meter_profile_find(table,
+ meter_profile_id)) {
+ struct rte_table_action_meter_profile profile;
+
+ memset(&profile, 0, sizeof(profile));
+ profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ profile.trtcm.cir = mp->params.trtcm_rfc2698.cir;
+ profile.trtcm.pir = mp->params.trtcm_rfc2698.pir;
+ profile.trtcm.cbs = mp->params.trtcm_rfc2698.cbs;
+ profile.trtcm.pbs = mp->params.trtcm_rfc2698.pbs;
+
+ status = softnic_pipeline_table_mtr_profile_add(softnic,
+ pipeline->name,
+ table_id,
+ meter_profile_id,
+ &profile);
+ if (status) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "METER: Table meter profile add failed");
+ return -1;
+ }
+ }
+
+ /* RTE_TABLE_ACTION_METER */
+ rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id;
+ rule_action->mtr.mtr[0].policer[e_RTE_METER_GREEN] =
+ (enum rte_table_action_policer)m->params.action[RTE_MTR_GREEN];
+ rule_action->mtr.mtr[0].policer[e_RTE_METER_YELLOW] =
+ (enum rte_table_action_policer)m->params.action[RTE_MTR_YELLOW];
+ rule_action->mtr.mtr[0].policer[e_RTE_METER_RED] =
+ (enum rte_table_action_policer)m->params.action[RTE_MTR_RED];
+ rule_action->mtr.tc_mask = 1;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_METER */
+
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ {
+ const struct rte_flow_action_vxlan_encap *conf =
+ action->conf;
+ const struct rte_flow_item *item;
+ union flow_item spec, mask;
+ int disabled = 0, status;
+ size_t size;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "VXLAN ENCAP: Null configuration");
+
+ item = conf->definition;
+ if (item == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "VXLAN ENCAP: Null configuration definition");
+
+ if (!(params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_ENCAP)))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "VXLAN ENCAP: Encap action not enabled for this table");
+
+ /* Check for Ether. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: first encap item should be ether");
+ }
+ ether_addr_copy(&spec.eth.dst,
+ &rule_action->encap.vxlan.ether.da);
+ ether_addr_copy(&spec.eth.src,
+ &rule_action->encap.vxlan.ether.sa);
+
+ item++;
+
+ /* Check for VLAN. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!params->encap.vxlan.vlan)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: vlan encap not supported by table");
+
+ uint16_t tci = rte_ntohs(spec.vlan.tci);
+ rule_action->encap.vxlan.vlan.pcp =
+ tci >> 13;
+ rule_action->encap.vxlan.vlan.dei =
+ (tci >> 12) & 0x1;
+ rule_action->encap.vxlan.vlan.vid =
+ tci & 0xfff;
+
+ item++;
+
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec,
+ &mask, &size, &disabled, error);
+ if (status)
+ return status;
+ } else {
+ if (params->encap.vxlan.vlan)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: expecting vlan encap item");
+ }
+
+ /* Check for IPV4/IPV6. */
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ {
+ rule_action->encap.vxlan.ipv4.sa =
+ rte_ntohl(spec.ipv4.hdr.src_addr);
+ rule_action->encap.vxlan.ipv4.da =
+ rte_ntohl(spec.ipv4.hdr.dst_addr);
+ rule_action->encap.vxlan.ipv4.dscp =
+ spec.ipv4.hdr.type_of_service >> 2;
+ rule_action->encap.vxlan.ipv4.ttl =
+ spec.ipv4.hdr.time_to_live;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ {
+ uint32_t vtc_flow;
+
+ memcpy(&rule_action->encap.vxlan.ipv6.sa,
+ &spec.ipv6.hdr.src_addr,
+ sizeof(spec.ipv6.hdr.src_addr));
+ memcpy(&rule_action->encap.vxlan.ipv6.da,
+ &spec.ipv6.hdr.dst_addr,
+ sizeof(spec.ipv6.hdr.dst_addr));
+ vtc_flow = rte_ntohl(spec.ipv6.hdr.vtc_flow);
+ rule_action->encap.vxlan.ipv6.flow_label =
+ vtc_flow & 0xfffff;
+ rule_action->encap.vxlan.ipv6.dscp =
+ (vtc_flow >> 22) & 0x3f;
+ rule_action->encap.vxlan.ipv6.hop_limit =
+ spec.ipv6.hdr.hop_limits;
+ break;
+ }
+ default:
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: encap item after ether should be ipv4/ipv6");
+ }
+
+ item++;
+
+ /* Check for UDP. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: encap item after ipv4/ipv6 should be udp");
+ }
+ rule_action->encap.vxlan.udp.sp =
+ rte_ntohs(spec.udp.hdr.src_port);
+ rule_action->encap.vxlan.udp.dp =
+ rte_ntohs(spec.udp.hdr.dst_port);
+
+ item++;
+
+ /* Check for VXLAN. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: encap item after udp should be vxlan");
+ }
+ rule_action->encap.vxlan.vxlan.vni =
+ (spec.vxlan.vni[0] << 16U |
+ spec.vxlan.vni[1] << 8U
+ | spec.vxlan.vni[2]);
+
+ item++;
+
+ /* Check for END. */
+ flow_item_skip_void(&item);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: expecting END item");
+
+ rule_action->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP */
+
+ default:
+ return -ENOTSUP;
+ }
+ }
+
+ if (n_jump_queue_rss_drop == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Flow does not have any terminating action");
+
+ return 0;
+}
+
+static int
+pmd_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *error)
+{
+ struct softnic_table_rule_match rule_match;
+ struct softnic_table_rule_action rule_action;
+
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ const char *pipeline_name = NULL;
+ uint32_t table_id = 0;
+ int status;
+
+ /* Check input parameters. */
+ if (attr == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "Null attr");
+
+ if (item == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Null item");
+
+ if (action == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Null action");
+
+ /* Identify the pipeline table to add this flow to. */
+ status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
+ &table_id, error);
+ if (status)
+ return status;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline name");
+
+ if (table_id >= pipeline->n_tables)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline table ID");
+
+ table = &pipeline->table[table_id];
+
+ /* Rule match. */
+ memset(&rule_match, 0, sizeof(rule_match));
+ status = flow_rule_match_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ &rule_match,
+ error);
+ if (status)
+ return status;
+
+ /* Rule action. */
+ memset(&rule_action, 0, sizeof(rule_action));
+ status = flow_rule_action_get(softnic,
+ pipeline,
+ table,
+ attr,
+ action,
+ &rule_action,
+ error);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static struct softnic_mtr *
+flow_action_meter_get(struct pmd_internals *softnic,
+ const struct rte_flow_action *action)
+{
+ for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++)
+ if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
+ const struct rte_flow_action_meter *conf = action->conf;
+
+ if (conf == NULL)
+ return NULL;
+
+ return softnic_mtr_find(softnic, conf->mtr_id);
+ }
+
+ return NULL;
+}
+
+static void
+flow_meter_owner_reset(struct pmd_internals *softnic,
+ struct rte_flow *flow)
+{
+ struct softnic_mtr_list *ml = &softnic->mtr.mtrs;
+ struct softnic_mtr *m;
+
+ TAILQ_FOREACH(m, ml, node)
+ if (m->flow == flow) {
+ m->flow = NULL;
+ break;
+ }
+}
+
+static void
+flow_meter_owner_set(struct pmd_internals *softnic,
+ struct rte_flow *flow,
+ struct softnic_mtr *mtr)
+{
+ /* Reset current flow meter */
+ flow_meter_owner_reset(softnic, flow);
+
+ /* Set new flow meter */
+ mtr->flow = flow;
+}
+
+static int
+is_meter_action_enable(struct pmd_internals *softnic,
+ struct softnic_table *table)
+{
+ struct softnic_table_action_profile *profile =
+ softnic_table_action_profile_find(softnic,
+ table->params.action_profile_name);
+ struct softnic_table_action_profile_params *params = &profile->params;
+
+ return (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) ? 1 : 0;
+}
+
+static struct rte_flow *
+pmd_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *error)
+{
+ struct softnic_table_rule_match rule_match;
+ struct softnic_table_rule_action rule_action;
+ void *rule_data;
+
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct rte_flow *flow;
+ struct softnic_mtr *mtr;
+ const char *pipeline_name = NULL;
+ uint32_t table_id = 0;
+ int new_flow, status;
+
+ /* Check input parameters. */
+ if (attr == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "Null attr");
+ return NULL;
+ }
+
+ if (item == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Null item");
+ return NULL;
+ }
+
+ if (action == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Null action");
+ return NULL;
+ }
+
+ /* Identify the pipeline table to add this flow to. */
+ status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
+ &table_id, error);
+ if (status)
+ return NULL;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline name");
+ return NULL;
+ }
+
+ if (table_id >= pipeline->n_tables) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline table ID");
+ return NULL;
+ }
+
+ table = &pipeline->table[table_id];
+
+ /* Rule match. */
+ memset(&rule_match, 0, sizeof(rule_match));
+ status = flow_rule_match_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ &rule_match,
+ error);
+ if (status)
+ return NULL;
+
+ /* Rule action. */
+ memset(&rule_action, 0, sizeof(rule_action));
+ status = flow_rule_action_get(softnic,
+ pipeline,
+ table,
+ attr,
+ action,
+ &rule_action,
+ error);
+ if (status)
+ return NULL;
+
+ /* Flow find/allocate. */
+ new_flow = 0;
+ flow = softnic_flow_find(table, &rule_match);
+ if (flow == NULL) {
+ new_flow = 1;
+ flow = calloc(1, sizeof(struct rte_flow));
+ if (flow == NULL) {
+ rte_flow_error_set(error,
+ ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Not enough memory for new flow");
+ return NULL;
+ }
+ }
+
+ /* Rule add. */
+ status = softnic_pipeline_table_rule_add(softnic,
+ pipeline_name,
+ table_id,
+ &rule_match,
+ &rule_action,
+ &rule_data);
+ if (status) {
+ if (new_flow)
+ free(flow);
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule add failed");
+ return NULL;
+ }
+
+ /* Flow fill in. */
+ memcpy(&flow->match, &rule_match, sizeof(rule_match));
+ memcpy(&flow->action, &rule_action, sizeof(rule_action));
+ flow->data = rule_data;
+ flow->pipeline = pipeline;
+ flow->table_id = table_id;
+
+ mtr = flow_action_meter_get(softnic, action);
+ if (mtr)
+ flow_meter_owner_set(softnic, flow, mtr);
+
+ /* Flow add to list. */
+ if (new_flow)
+ TAILQ_INSERT_TAIL(&table->flows, flow, node);
+
+ return flow;
+}
+
+static int
+pmd_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct softnic_table *table;
+ int status;
+
+ /* Check input parameters. */
+ if (flow == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Null flow");
+
+ table = &flow->pipeline->table[flow->table_id];
+
+ /* Rule delete. */
+ status = softnic_pipeline_table_rule_delete(softnic,
+ flow->pipeline->name,
+ flow->table_id,
+ &flow->match);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule delete failed");
+
+ /* Update dependencies */
+ if (is_meter_action_enable(softnic, table))
+ flow_meter_owner_reset(softnic, flow);
+
+ /* Flow delete. */
+ TAILQ_REMOVE(&table->flows, flow, node);
+ free(flow);
+
+ return 0;
+}
+
+static int
+pmd_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct pipeline *pipeline;
+ int fail_to_del_rule = 0;
+ uint32_t i;
+
+ TAILQ_FOREACH(pipeline, &softnic->pipeline_list, node) {
+ /* Remove all the flows added to the tables. */
+ for (i = 0; i < pipeline->n_tables; i++) {
+ struct softnic_table *table = &pipeline->table[i];
+ struct rte_flow *flow;
+ void *temp;
+ int status;
+
+ TAILQ_FOREACH_SAFE(flow, &table->flows, node, temp) {
+ /* Rule delete. */
+ status = softnic_pipeline_table_rule_delete
+ (softnic,
+ pipeline->name,
+ i,
+ &flow->match);
+ if (status)
+ fail_to_del_rule = 1;
+ /* Update dependencies */
+ if (is_meter_action_enable(softnic, table))
+ flow_meter_owner_reset(softnic, flow);
+
+ /* Flow delete. */
+ TAILQ_REMOVE(&table->flows, flow, node);
+ free(flow);
+ }
+ }
+ }
+
+ if (fail_to_del_rule)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Some of the rules could not be deleted");
+
+ return 0;
+}
+
+static int
+pmd_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action __rte_unused,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct rte_table_action_stats_counters stats;
+ struct softnic_table *table;
+ struct rte_flow_query_count *flow_stats = data;
+ int status;
+
+ /* Check input parameters. */
+ if (flow == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Null flow");
+
+ if (data == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Null data");
+
+ table = &flow->pipeline->table[flow->table_id];
+
+ /* Rule stats read. */
+ status = rte_table_action_stats_read(table->a,
+ flow->data,
+ &stats,
+ flow_stats->reset);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule stats read failed");
+
+ /* Fill in flow stats. */
+ flow_stats->hits_set =
+ (table->ap->params.stats.n_packets_enabled) ? 1 : 0;
+ flow_stats->bytes_set =
+ (table->ap->params.stats.n_bytes_enabled) ? 1 : 0;
+ flow_stats->hits = stats.n_packets;
+ flow_stats->bytes = stats.n_bytes;
+
+ return 0;
+}
+
+const struct rte_flow_ops pmd_flow_ops = {
+ .validate = pmd_flow_validate,
+ .create = pmd_flow_create,
+ .destroy = pmd_flow_destroy,
+ .flush = pmd_flow_flush,
+ .query = pmd_flow_query,
+ .isolate = NULL,
+};
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index a25eb874..e12b8ae4 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -18,8 +18,11 @@
#include <rte_table_action.h>
#include <rte_pipeline.h>
+#include <rte_ethdev_core.h>
#include <rte_ethdev_driver.h>
#include <rte_tm_driver.h>
+#include <rte_flow_driver.h>
+#include <rte_mtr_driver.h>
#include "rte_eth_softnic.h"
#include "conn.h"
@@ -44,6 +47,57 @@ struct pmd_params {
};
/**
+ * Ethdev Flow API
+ */
+struct rte_flow;
+
+TAILQ_HEAD(flow_list, rte_flow);
+
+struct flow_attr_map {
+ char pipeline_name[NAME_SIZE];
+ uint32_t table_id;
+ int valid;
+};
+
+#ifndef SOFTNIC_FLOW_MAX_GROUPS
+#define SOFTNIC_FLOW_MAX_GROUPS 64
+#endif
+
+struct flow_internals {
+ struct flow_attr_map ingress_map[SOFTNIC_FLOW_MAX_GROUPS];
+ struct flow_attr_map egress_map[SOFTNIC_FLOW_MAX_GROUPS];
+};
+
+/**
+ * Meter
+ */
+
+/* MTR meter profile */
+struct softnic_mtr_meter_profile {
+ TAILQ_ENTRY(softnic_mtr_meter_profile) node;
+ uint32_t meter_profile_id;
+ struct rte_mtr_meter_profile params;
+ uint32_t n_users;
+};
+
+TAILQ_HEAD(softnic_mtr_meter_profile_list, softnic_mtr_meter_profile);
+
+/* MTR meter object */
+struct softnic_mtr {
+ TAILQ_ENTRY(softnic_mtr) node;
+ uint32_t mtr_id;
+ struct rte_mtr_params params;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(softnic_mtr_list, softnic_mtr);
+
+struct mtr_internals {
+ struct softnic_mtr_meter_profile_list meter_profiles;
+ struct softnic_mtr_list mtrs;
+};
+
+/**
* MEMPOOL
*/
struct softnic_mempool_params {
@@ -225,6 +279,25 @@ struct softnic_tap {
TAILQ_HEAD(softnic_tap_list, softnic_tap);
/**
+ * Cryptodev
+ */
+struct softnic_cryptodev_params {
+ const char *dev_name;
+ uint32_t dev_id; /**< Valid only when *dev_name* is NULL. */
+ uint32_t n_queues;
+ uint32_t queue_size;
+};
+
+struct softnic_cryptodev {
+ TAILQ_ENTRY(softnic_cryptodev) node;
+ char name[NAME_SIZE];
+ uint16_t dev_id;
+ uint32_t n_queues;
+};
+
+TAILQ_HEAD(softnic_cryptodev_list, softnic_cryptodev);
+
+/**
* Input port action
*/
struct softnic_port_in_action_profile_params {
@@ -255,6 +328,7 @@ struct softnic_table_action_profile_params {
struct rte_table_action_nat_config nat;
struct rte_table_action_ttl_config ttl;
struct rte_table_action_stats_config stats;
+ struct rte_table_action_sym_crypto_config sym_crypto;
};
struct softnic_table_action_profile {
@@ -266,6 +340,15 @@ struct softnic_table_action_profile {
TAILQ_HEAD(softnic_table_action_profile_list, softnic_table_action_profile);
+struct softnic_table_meter_profile {
+ TAILQ_ENTRY(softnic_table_meter_profile) node;
+ uint32_t meter_profile_id;
+ struct rte_table_action_meter_profile profile;
+};
+
+TAILQ_HEAD(softnic_table_meter_profile_list,
+ softnic_table_meter_profile);
+
/**
* Pipeline
*/
@@ -280,12 +363,13 @@ enum softnic_port_in_type {
PORT_IN_TMGR,
PORT_IN_TAP,
PORT_IN_SOURCE,
+ PORT_IN_CRYPTODEV,
};
struct softnic_port_in_params {
/* Read */
enum softnic_port_in_type type;
- const char *dev_name;
+ char dev_name[NAME_SIZE];
union {
struct {
uint16_t queue_id;
@@ -301,11 +385,17 @@ struct softnic_port_in_params {
const char *file_name;
uint32_t n_bytes_per_pkt;
} source;
+
+ struct {
+ uint16_t queue_id;
+ void *f_callback;
+ void *arg_callback;
+ } cryptodev;
};
uint32_t burst_size;
/* Action */
- const char *action_profile_name;
+ char action_profile_name[NAME_SIZE];
};
enum softnic_port_out_type {
@@ -314,11 +404,12 @@ enum softnic_port_out_type {
PORT_OUT_TMGR,
PORT_OUT_TAP,
PORT_OUT_SINK,
+ PORT_OUT_CRYPTODEV,
};
struct softnic_port_out_params {
enum softnic_port_out_type type;
- const char *dev_name;
+ char dev_name[NAME_SIZE];
union {
struct {
uint16_t queue_id;
@@ -328,6 +419,11 @@ struct softnic_port_out_params {
const char *file_name;
uint32_t max_n_pkts;
} sink;
+
+ struct {
+ uint16_t queue_id;
+ uint32_t op_offset;
+ } cryptodev;
};
uint32_t burst_size;
int retry;
@@ -353,11 +449,15 @@ struct softnic_table_array_params {
uint32_t key_offset;
};
+#ifndef TABLE_RULE_MATCH_SIZE_MAX
+#define TABLE_RULE_MATCH_SIZE_MAX 256
+#endif
+
struct softnic_table_hash_params {
uint32_t n_keys;
uint32_t key_offset;
uint32_t key_size;
- uint8_t *key_mask;
+ uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
uint32_t n_buckets;
int extendable_bucket;
};
@@ -379,7 +479,7 @@ struct softnic_table_params {
} match;
/* Action */
- const char *action_profile_name;
+ char action_profile_name[NAME_SIZE];
};
struct softnic_port_in {
@@ -388,10 +488,17 @@ struct softnic_port_in {
struct rte_port_in_action *a;
};
+struct softnic_port_out {
+ struct softnic_port_out_params params;
+};
+
struct softnic_table {
struct softnic_table_params params;
struct softnic_table_action_profile *ap;
struct rte_table_action *a;
+ struct flow_list flows;
+ struct rte_table_action_dscp_table dscp_table;
+ struct softnic_table_meter_profile_list meter_profiles;
};
struct pipeline {
@@ -399,7 +506,9 @@ struct pipeline {
char name[NAME_SIZE];
struct rte_pipeline *p;
+ struct pipeline_params params;
struct softnic_port_in port_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct softnic_port_out port_out[RTE_PIPELINE_PORT_OUT_MAX];
struct softnic_table table[RTE_PIPELINE_TABLE_MAX];
uint32_t n_ports_in;
uint32_t n_ports_out;
@@ -489,12 +598,16 @@ struct pmd_internals {
struct tm_internals tm; /**< Traffic Management */
} soft;
+ struct flow_internals flow;
+ struct mtr_internals mtr;
+
struct softnic_conn *conn;
struct softnic_mempool_list mempool_list;
struct softnic_swq_list swq_list;
struct softnic_link_list link_list;
struct softnic_tmgr_port_list tmgr_port_list;
struct softnic_tap_list tap_list;
+ struct softnic_cryptodev_list cryptodev_list;
struct softnic_port_in_action_profile_list port_in_action_profile_list;
struct softnic_table_action_profile_list table_action_profile_list;
struct pipeline_list pipeline_list;
@@ -502,6 +615,58 @@ struct pmd_internals {
struct softnic_thread_data thread_data[RTE_MAX_LCORE];
};
+static inline struct rte_eth_dev *
+ETHDEV(struct pmd_internals *softnic)
+{
+ uint16_t port_id;
+ int status;
+
+ if (softnic == NULL)
+ return NULL;
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return NULL;
+
+ return &rte_eth_devices[port_id];
+}
+
+/**
+ * Ethdev Flow API
+ */
+int
+flow_attr_map_set(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress,
+ const char *pipeline_name,
+ uint32_t table_id);
+
+struct flow_attr_map *
+flow_attr_map_get(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress);
+
+extern const struct rte_flow_ops pmd_flow_ops;
+
+/**
+ * Meter
+ */
+int
+softnic_mtr_init(struct pmd_internals *p);
+
+void
+softnic_mtr_free(struct pmd_internals *p);
+
+struct softnic_mtr *
+softnic_mtr_find(struct pmd_internals *p,
+ uint32_t mtr_id);
+
+struct softnic_mtr_meter_profile *
+softnic_mtr_meter_profile_find(struct pmd_internals *p,
+ uint32_t meter_profile_id);
+
+extern const struct rte_mtr_ops pmd_mtr_ops;
+
/**
* MEMPOOL
*/
@@ -610,6 +775,24 @@ softnic_tap_create(struct pmd_internals *p,
const char *name);
/**
+ * Sym Crypto
+ */
+int
+softnic_cryptodev_init(struct pmd_internals *p);
+
+void
+softnic_cryptodev_free(struct pmd_internals *p);
+
+struct softnic_cryptodev *
+softnic_cryptodev_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_cryptodev *
+softnic_cryptodev_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_cryptodev_params *params);
+
+/**
* Input port action
*/
int
@@ -683,10 +866,20 @@ softnic_pipeline_port_out_create(struct pmd_internals *p,
struct softnic_port_out_params *params);
int
+softnic_pipeline_port_out_find(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ const char *name,
+ uint32_t *port_id);
+
+int
softnic_pipeline_table_create(struct pmd_internals *p,
const char *pipeline_name,
struct softnic_table_params *params);
+struct softnic_table_meter_profile *
+softnic_pipeline_table_meter_profile_find(struct softnic_table *table,
+ uint32_t meter_profile_id);
+
struct softnic_table_rule_match_acl {
int ip_version;
@@ -718,10 +911,6 @@ struct softnic_table_rule_match_array {
uint32_t pos;
};
-#ifndef TABLE_RULE_MATCH_SIZE_MAX
-#define TABLE_RULE_MATCH_SIZE_MAX 256
-#endif
-
struct softnic_table_rule_match_hash {
uint8_t key[TABLE_RULE_MATCH_SIZE_MAX];
};
@@ -760,6 +949,18 @@ struct softnic_table_rule_action {
struct rte_table_action_ttl_params ttl;
struct rte_table_action_stats_params stats;
struct rte_table_action_time_params time;
+ struct rte_table_action_tag_params tag;
+ struct rte_table_action_decap_params decap;
+ struct rte_table_action_sym_crypto_params sym_crypto;
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ struct softnic_table_rule_match match;
+ struct softnic_table_rule_action action;
+ void *data;
+ struct pipeline *pipeline;
+ uint32_t table_id;
};
int
diff --git a/drivers/net/softnic/rte_eth_softnic_meter.c b/drivers/net/softnic/rte_eth_softnic_meter.c
new file mode 100644
index 00000000..73ecf3b1
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_meter.c
@@ -0,0 +1,728 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_mtr_init(struct pmd_internals *p)
+{
+ /* Initialize meter profiles list */
+ TAILQ_INIT(&p->mtr.meter_profiles);
+
+ /* Initialize MTR objects list */
+ TAILQ_INIT(&p->mtr.mtrs);
+
+ return 0;
+}
+
+void
+softnic_mtr_free(struct pmd_internals *p)
+{
+ /* Remove MTR objects */
+ for ( ; ; ) {
+ struct softnic_mtr *m;
+
+ m = TAILQ_FIRST(&p->mtr.mtrs);
+ if (m == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->mtr.mtrs, m, node);
+ free(m);
+ }
+
+ /* Remove meter profiles */
+ for ( ; ; ) {
+ struct softnic_mtr_meter_profile *mp;
+
+ mp = TAILQ_FIRST(&p->mtr.meter_profiles);
+ if (mp == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->mtr.meter_profiles, mp, node);
+ free(mp);
+ }
+}
+
+struct softnic_mtr_meter_profile *
+softnic_mtr_meter_profile_find(struct pmd_internals *p,
+ uint32_t meter_profile_id)
+{
+ struct softnic_mtr_meter_profile_list *mpl = &p->mtr.meter_profiles;
+ struct softnic_mtr_meter_profile *mp;
+
+ TAILQ_FOREACH(mp, mpl, node)
+ if (meter_profile_id == mp->meter_profile_id)
+ return mp;
+
+ return NULL;
+}
+
+static int
+meter_profile_check(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile *mp;
+
+ /* Meter profile ID must be valid. */
+ if (meter_profile_id == UINT32_MAX)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile id not valid");
+
+ /* Meter profile must not exist. */
+ mp = softnic_mtr_meter_profile_find(p, meter_profile_id);
+ if (mp)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter prfile already exists");
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "profile null");
+
+ /* Traffic metering algorithm : TRTCM_RFC2698 */
+ if (profile->alg != RTE_MTR_TRTCM_RFC2698)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "Metering alg not supported");
+
+ return 0;
+}
+
+/* MTR meter profile add */
+static int
+pmd_mtr_meter_profile_add(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile_list *mpl = &p->mtr.meter_profiles;
+ struct softnic_mtr_meter_profile *mp;
+ int status;
+
+ /* Check input params */
+ status = meter_profile_check(dev, meter_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ mp = calloc(1, sizeof(struct softnic_mtr_meter_profile));
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Memory alloc failed");
+
+ /* Fill in */
+ mp->meter_profile_id = meter_profile_id;
+ memcpy(&mp->params, profile, sizeof(mp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(mpl, mp, node);
+
+ return 0;
+}
+
+/* MTR meter profile delete */
+static int
+pmd_mtr_meter_profile_delete(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile *mp;
+
+ /* Meter profile must exist */
+ mp = softnic_mtr_meter_profile_find(p, meter_profile_id);
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile id invalid");
+
+ /* Check unused */
+ if (mp->n_users)
+ return -rte_mtr_error_set(error,
+ EBUSY,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile in use");
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->mtr.meter_profiles, mp, node);
+ free(mp);
+
+ return 0;
+}
+
+struct softnic_mtr *
+softnic_mtr_find(struct pmd_internals *p, uint32_t mtr_id)
+{
+ struct softnic_mtr_list *ml = &p->mtr.mtrs;
+ struct softnic_mtr *m;
+
+ TAILQ_FOREACH(m, ml, node)
+ if (m->mtr_id == mtr_id)
+ return m;
+
+ return NULL;
+}
+
+
+static int
+mtr_check(struct pmd_internals *p,
+ uint32_t mtr_id,
+ struct rte_mtr_params *params,
+ int shared,
+ struct rte_mtr_error *error)
+{
+ /* MTR id valid */
+ if (softnic_mtr_find(p, mtr_id))
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object already exists");
+
+ /* MTR params must not be NULL */
+ if (params == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL,
+ "MTR object params null");
+
+ /* Previous meter color not supported */
+ if (params->use_prev_mtr_color)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL,
+ "Previous meter color not supported");
+
+ /* Shared MTR object not supported */
+ if (shared)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_SHARED,
+ NULL,
+ "Shared MTR object not supported");
+
+ return 0;
+}
+
+/* MTR object create */
+static int
+pmd_mtr_create(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_params *params,
+ int shared,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_list *ml = &p->mtr.mtrs;
+ struct softnic_mtr_meter_profile *mp;
+ struct softnic_mtr *m;
+ int status;
+
+ /* Check parameters */
+ status = mtr_check(p, mtr_id, params, shared, error);
+ if (status)
+ return status;
+
+ /* Meter profile must exist */
+ mp = softnic_mtr_meter_profile_find(p, params->meter_profile_id);
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile id not valid");
+
+ /* Memory allocation */
+ m = calloc(1, sizeof(struct softnic_mtr));
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Memory alloc failed");
+
+ /* Fill in */
+ m->mtr_id = mtr_id;
+ memcpy(&m->params, params, sizeof(m->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(ml, m, node);
+
+ /* Update dependencies */
+ mp->n_users++;
+
+ return 0;
+}
+
+/* MTR object destroy */
+static int
+pmd_mtr_destroy(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_list *ml = &p->mtr.mtrs;
+ struct softnic_mtr_meter_profile *mp;
+ struct softnic_mtr *m;
+
+ /* MTR object must exist */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* MTR object must not have any owner */
+ if (m->flow != NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "MTR object is being used");
+
+ /* Get meter profile */
+ mp = softnic_mtr_meter_profile_find(p, m->params.meter_profile_id);
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "MTR object meter profile invalid");
+
+ /* Update dependencies */
+ mp->n_users--;
+
+ /* Remove from list */
+ TAILQ_REMOVE(ml, m, node);
+ free(m);
+
+ return 0;
+}
+
+/* MTR object meter profile update */
+static int
+pmd_mtr_meter_profile_update(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile *mp_new, *mp_old;
+ struct softnic_mtr *m;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* Meter profile id must be valid */
+ mp_new = softnic_mtr_meter_profile_find(p, meter_profile_id);
+ if (mp_new == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile not valid");
+
+ /* MTR object already set to meter profile id */
+ if (m->params.meter_profile_id == meter_profile_id)
+ return 0;
+
+ /* MTR object owner table update */
+ if (m->flow) {
+ uint32_t table_id = m->flow->table_id;
+ struct softnic_table *table = &m->flow->pipeline->table[table_id];
+ struct softnic_table_rule_action action;
+
+ if (!softnic_pipeline_table_meter_profile_find(table,
+ meter_profile_id)) {
+ struct rte_table_action_meter_profile profile;
+
+ memset(&profile, 0, sizeof(profile));
+
+ profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ profile.trtcm.cir = mp_new->params.trtcm_rfc2698.cir;
+ profile.trtcm.pir = mp_new->params.trtcm_rfc2698.pir;
+ profile.trtcm.cbs = mp_new->params.trtcm_rfc2698.cbs;
+ profile.trtcm.pbs = mp_new->params.trtcm_rfc2698.pbs;
+
+ /* Add meter profile to pipeline table */
+ status = softnic_pipeline_table_mtr_profile_add(p,
+ m->flow->pipeline->name,
+ table_id,
+ meter_profile_id,
+ &profile);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Table meter profile add failed");
+ }
+
+ /* Set meter action */
+ memcpy(&action, &m->flow->action, sizeof(action));
+
+ action.mtr.mtr[0].meter_profile_id = meter_profile_id;
+
+ /* Re-add rule */
+ status = softnic_pipeline_table_rule_add(p,
+ m->flow->pipeline->name,
+ table_id,
+ &m->flow->match,
+ &action,
+ &m->flow->data);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule add failed");
+
+ /* Flow: update meter action */
+ memcpy(&m->flow->action, &action, sizeof(m->flow->action));
+ }
+
+ mp_old = softnic_mtr_meter_profile_find(p, m->params.meter_profile_id);
+
+ /* Meter: Set meter profile */
+ m->params.meter_profile_id = meter_profile_id;
+
+ /* Update dependencies*/
+ mp_old->n_users--;
+ mp_new->n_users++;
+
+ return 0;
+}
+
+/* MTR object meter DSCP table update */
+static int
+pmd_mtr_meter_dscp_table_update(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ enum rte_mtr_color *dscp_table,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_table_action_dscp_table dt;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct softnic_mtr *m;
+ uint32_t table_id, i;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* MTR object owner valid? */
+ if (m->flow == NULL)
+ return 0;
+
+ pipeline = m->flow->pipeline;
+ table_id = m->flow->table_id;
+ table = &pipeline->table[table_id];
+
+ memcpy(&dt, &table->dscp_table, sizeof(dt));
+ for (i = 0; i < RTE_DIM(dt.entry); i++)
+ dt.entry[i].color = (enum rte_meter_color)dscp_table[i];
+
+ /* Update table */
+ status = softnic_pipeline_table_dscp_table_update(p,
+ pipeline->name,
+ table_id,
+ UINT64_MAX,
+ &dt);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Table action dscp table update failed");
+
+ return 0;
+}
+
+/* MTR object policer action update */
+static int
+pmd_mtr_policer_actions_update(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ uint32_t action_mask,
+ enum rte_mtr_policer_action *actions,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr *m;
+ uint32_t i;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* Valid policer actions */
+ if (actions == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid actions");
+
+ for (i = 0; i < RTE_MTR_COLORS; i++) {
+ if (action_mask & (1 << i)) {
+ if (actions[i] != MTR_POLICER_ACTION_COLOR_GREEN &&
+ actions[i] != MTR_POLICER_ACTION_COLOR_YELLOW &&
+ actions[i] != MTR_POLICER_ACTION_COLOR_RED &&
+ actions[i] != MTR_POLICER_ACTION_DROP) {
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ " Invalid action value");
+ }
+ }
+ }
+
+ /* MTR object owner valid? */
+ if (m->flow) {
+ struct pipeline *pipeline = m->flow->pipeline;
+ struct softnic_table *table = &pipeline->table[m->flow->table_id];
+ struct softnic_table_rule_action action;
+
+ memcpy(&action, &m->flow->action, sizeof(action));
+
+ /* Set action */
+ for (i = 0; i < RTE_MTR_COLORS; i++)
+ if (action_mask & (1 << i))
+ action.mtr.mtr[0].policer[i] =
+ (enum rte_table_action_policer)actions[i];
+
+ /* Re-add the rule */
+ status = softnic_pipeline_table_rule_add(p,
+ pipeline->name,
+ m->flow->table_id,
+ &m->flow->match,
+ &action,
+ &m->flow->data);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule re-add failed");
+
+ /* Flow: Update meter action */
+ memcpy(&m->flow->action, &action, sizeof(m->flow->action));
+
+ /* Reset the meter stats */
+ rte_table_action_meter_read(table->a, m->flow->data,
+ 1, NULL, 1);
+ }
+
+ /* Meter: Update policer actions */
+ for (i = 0; i < RTE_MTR_COLORS; i++)
+ if (action_mask & (1 << i))
+ m->params.action[i] = actions[i];
+
+ return 0;
+}
+
+#define MTR_STATS_PKTS_DEFAULT (RTE_MTR_STATS_N_PKTS_GREEN | \
+ RTE_MTR_STATS_N_PKTS_YELLOW | \
+ RTE_MTR_STATS_N_PKTS_RED | \
+ RTE_MTR_STATS_N_PKTS_DROPPED)
+
+#define MTR_STATS_BYTES_DEFAULT (RTE_MTR_STATS_N_BYTES_GREEN | \
+ RTE_MTR_STATS_N_BYTES_YELLOW | \
+ RTE_MTR_STATS_N_BYTES_RED | \
+ RTE_MTR_STATS_N_BYTES_DROPPED)
+
+/* MTR object stats read */
+static void
+mtr_stats_convert(struct softnic_mtr *m,
+ struct rte_table_action_mtr_counters_tc *in,
+ struct rte_mtr_stats *out,
+ uint64_t *out_mask)
+{
+ memset(&out, 0, sizeof(out));
+ *out_mask = 0;
+
+ if (in->n_packets_valid) {
+ uint32_t i;
+
+ for (i = 0; i < RTE_MTR_COLORS; i++) {
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_GREEN)
+ out->n_pkts[RTE_MTR_GREEN] += in->n_packets[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_YELLOW)
+ out->n_pkts[RTE_MTR_YELLOW] += in->n_packets[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_RED)
+ out->n_pkts[RTE_MTR_RED] += in->n_packets[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_DROP)
+ out->n_pkts_dropped += in->n_packets[i];
+ }
+
+ *out_mask |= MTR_STATS_PKTS_DEFAULT;
+ }
+
+ if (in->n_bytes_valid) {
+ uint32_t i;
+
+ for (i = 0; i < RTE_MTR_COLORS; i++) {
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_GREEN)
+ out->n_bytes[RTE_MTR_GREEN] += in->n_bytes[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_YELLOW)
+ out->n_bytes[RTE_MTR_YELLOW] += in->n_bytes[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_RED)
+ out->n_bytes[RTE_MTR_RED] += in->n_bytes[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_DROP)
+ out->n_bytes_dropped += in->n_bytes[i];
+ }
+
+ *out_mask |= MTR_STATS_BYTES_DEFAULT;
+ }
+}
+
+/* MTR object stats read */
+static int
+pmd_mtr_stats_read(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_table_action_mtr_counters counters;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct softnic_mtr *m;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* MTR meter object owner valid? */
+ if (m->flow == NULL) {
+ if (stats != NULL)
+ memset(stats, 0, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = MTR_STATS_PKTS_DEFAULT |
+ MTR_STATS_BYTES_DEFAULT;
+
+ return 0;
+ }
+
+ pipeline = m->flow->pipeline;
+ table = &pipeline->table[m->flow->table_id];
+
+ /* Meter stats read. */
+ status = rte_table_action_meter_read(table->a,
+ m->flow->data,
+ 1,
+ &counters,
+ clear);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Meter stats read failed");
+
+ /* Stats format conversion. */
+ if (stats || stats_mask) {
+ struct rte_mtr_stats s;
+ uint64_t s_mask = 0;
+
+ mtr_stats_convert(m,
+ &counters.stats[0],
+ &s,
+ &s_mask);
+
+ if (stats)
+ memcpy(stats, &s, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = s_mask;
+ }
+
+ return 0;
+}
+
+const struct rte_mtr_ops pmd_mtr_ops = {
+ .capabilities_get = NULL,
+
+ .meter_profile_add = pmd_mtr_meter_profile_add,
+ .meter_profile_delete = pmd_mtr_meter_profile_delete,
+
+ .create = pmd_mtr_create,
+ .destroy = pmd_mtr_destroy,
+ .meter_enable = NULL,
+ .meter_disable = NULL,
+
+ .meter_profile_update = pmd_mtr_meter_profile_update,
+ .meter_dscp_table_update = pmd_mtr_meter_dscp_table_update,
+ .policer_actions_update = pmd_mtr_policer_actions_update,
+ .stats_update = NULL,
+
+ .stats_read = pmd_mtr_stats_read,
+};
diff --git a/drivers/net/softnic/rte_eth_softnic_pipeline.c b/drivers/net/softnic/rte_eth_softnic_pipeline.c
index 45136a4a..5e180f8f 100644
--- a/drivers/net/softnic/rte_eth_softnic_pipeline.c
+++ b/drivers/net/softnic/rte_eth_softnic_pipeline.c
@@ -15,18 +15,18 @@
#include <rte_port_source_sink.h>
#include <rte_port_fd.h>
#include <rte_port_sched.h>
+#include <rte_port_sym_crypto.h>
#include <rte_table_acl.h>
#include <rte_table_array.h>
#include <rte_table_hash.h>
+#include <rte_table_hash_func.h>
#include <rte_table_lpm.h>
#include <rte_table_lpm_ipv6.h>
#include <rte_table_stub.h>
#include "rte_eth_softnic_internals.h"
-#include "hash_func.h"
-
#ifndef PIPELINE_MSGQ_SIZE
#define PIPELINE_MSGQ_SIZE 64
#endif
@@ -43,17 +43,52 @@ softnic_pipeline_init(struct pmd_internals *p)
return 0;
}
+static void
+softnic_pipeline_table_free(struct softnic_table *table)
+{
+ for ( ; ; ) {
+ struct rte_flow *flow;
+
+ flow = TAILQ_FIRST(&table->flows);
+ if (flow == NULL)
+ break;
+
+ TAILQ_REMOVE(&table->flows, flow, node);
+ free(flow);
+ }
+
+ for ( ; ; ) {
+ struct softnic_table_meter_profile *mp;
+
+ mp = TAILQ_FIRST(&table->meter_profiles);
+ if (mp == NULL)
+ break;
+
+ TAILQ_REMOVE(&table->meter_profiles, mp, node);
+ free(mp);
+ }
+}
+
void
softnic_pipeline_free(struct pmd_internals *p)
{
for ( ; ; ) {
struct pipeline *pipeline;
+ uint32_t table_id;
pipeline = TAILQ_FIRST(&p->pipeline_list);
if (pipeline == NULL)
break;
TAILQ_REMOVE(&p->pipeline_list, pipeline, node);
+
+ for (table_id = 0; table_id < pipeline->n_tables; table_id++) {
+ struct softnic_table *table =
+ &pipeline->table[table_id];
+
+ softnic_pipeline_table_free(table);
+ }
+
rte_ring_free(pipeline->msgq_req);
rte_ring_free(pipeline->msgq_rsp);
rte_pipeline_free(pipeline->p);
@@ -160,6 +195,7 @@ softnic_pipeline_create(struct pmd_internals *softnic,
/* Node fill in */
strlcpy(pipeline->name, name, sizeof(pipeline->name));
pipeline->p = p;
+ memcpy(&pipeline->params, params, sizeof(*params));
pipeline->n_ports_in = 0;
pipeline->n_ports_out = 0;
pipeline->n_tables = 0;
@@ -189,6 +225,7 @@ softnic_pipeline_port_in_create(struct pmd_internals *softnic,
struct rte_port_sched_reader_params sched;
struct rte_port_fd_reader_params fd;
struct rte_port_source_params source;
+ struct rte_port_sym_crypto_reader_params cryptodev;
} pp;
struct pipeline *pipeline;
@@ -213,7 +250,7 @@ softnic_pipeline_port_in_create(struct pmd_internals *softnic,
return -1;
ap = NULL;
- if (params->action_profile_name) {
+ if (strlen(params->action_profile_name)) {
ap = softnic_port_in_action_profile_find(softnic,
params->action_profile_name);
if (ap == NULL)
@@ -306,6 +343,23 @@ softnic_pipeline_port_in_create(struct pmd_internals *softnic,
break;
}
+ case PORT_IN_CRYPTODEV:
+ {
+ struct softnic_cryptodev *cryptodev;
+
+ cryptodev = softnic_cryptodev_find(softnic, params->dev_name);
+ if (cryptodev == NULL)
+ return -1;
+
+ pp.cryptodev.cryptodev_id = cryptodev->dev_id;
+ pp.cryptodev.queue_id = params->cryptodev.queue_id;
+ pp.cryptodev.f_callback = params->cryptodev.f_callback;
+ pp.cryptodev.arg_callback = params->cryptodev.arg_callback;
+ p.ops = &rte_port_sym_crypto_reader_ops;
+ p.arg_create = &pp.cryptodev;
+ break;
+ }
+
default:
return -1;
}
@@ -392,15 +446,18 @@ softnic_pipeline_port_out_create(struct pmd_internals *softnic,
struct rte_port_sched_writer_params sched;
struct rte_port_fd_writer_params fd;
struct rte_port_sink_params sink;
+ struct rte_port_sym_crypto_writer_params cryptodev;
} pp;
union {
struct rte_port_ethdev_writer_nodrop_params ethdev;
struct rte_port_ring_writer_nodrop_params ring;
struct rte_port_fd_writer_nodrop_params fd;
+ struct rte_port_sym_crypto_writer_nodrop_params cryptodev;
} pp_nodrop;
struct pipeline *pipeline;
+ struct softnic_port_out *port_out;
uint32_t port_id;
int status;
@@ -526,6 +583,40 @@ softnic_pipeline_port_out_create(struct pmd_internals *softnic,
break;
}
+ case PORT_OUT_CRYPTODEV:
+ {
+ struct softnic_cryptodev *cryptodev;
+
+ cryptodev = softnic_cryptodev_find(softnic, params->dev_name);
+ if (cryptodev == NULL)
+ return -1;
+
+ if (params->cryptodev.queue_id >= cryptodev->n_queues)
+ return -1;
+
+ pp.cryptodev.cryptodev_id = cryptodev->dev_id;
+ pp.cryptodev.queue_id = params->cryptodev.queue_id;
+ pp.cryptodev.tx_burst_sz = params->burst_size;
+ pp.cryptodev.crypto_op_offset = params->cryptodev.op_offset;
+
+ pp_nodrop.cryptodev.cryptodev_id = cryptodev->dev_id;
+ pp_nodrop.cryptodev.queue_id = params->cryptodev.queue_id;
+ pp_nodrop.cryptodev.tx_burst_sz = params->burst_size;
+ pp_nodrop.cryptodev.n_retries = params->retry;
+ pp_nodrop.cryptodev.crypto_op_offset =
+ params->cryptodev.op_offset;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_sym_crypto_writer_ops;
+ p.arg_create = &pp.cryptodev;
+ } else {
+ p.ops = &rte_port_sym_crypto_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.cryptodev;
+ }
+
+ break;
+ }
+
default:
return -1;
}
@@ -542,6 +633,8 @@ softnic_pipeline_port_out_create(struct pmd_internals *softnic,
return -1;
/* Pipeline */
+ port_out = &pipeline->port_out[pipeline->n_ports_out];
+ memcpy(&port_out->params, params, sizeof(*params));
pipeline->n_ports_out++;
return 0;
@@ -730,7 +823,7 @@ softnic_pipeline_table_create(struct pmd_internals *softnic,
return -1;
ap = NULL;
- if (params->action_profile_name) {
+ if (strlen(params->action_profile_name)) {
ap = softnic_table_action_profile_find(softnic,
params->action_profile_name);
if (ap == NULL)
@@ -797,28 +890,28 @@ softnic_pipeline_table_create(struct pmd_internals *softnic,
switch (params->match.hash.key_size) {
case 8:
- f_hash = hash_default_key8;
+ f_hash = rte_table_hash_crc_key8;
break;
case 16:
- f_hash = hash_default_key16;
+ f_hash = rte_table_hash_crc_key16;
break;
case 24:
- f_hash = hash_default_key24;
+ f_hash = rte_table_hash_crc_key24;
break;
case 32:
- f_hash = hash_default_key32;
+ f_hash = rte_table_hash_crc_key32;
break;
case 40:
- f_hash = hash_default_key40;
+ f_hash = rte_table_hash_crc_key40;
break;
case 48:
- f_hash = hash_default_key48;
+ f_hash = rte_table_hash_crc_key48;
break;
case 56:
- f_hash = hash_default_key56;
+ f_hash = rte_table_hash_crc_key56;
break;
case 64:
- f_hash = hash_default_key64;
+ f_hash = rte_table_hash_crc_key64;
break;
default:
return -1;
@@ -960,7 +1053,51 @@ softnic_pipeline_table_create(struct pmd_internals *softnic,
memcpy(&table->params, params, sizeof(*params));
table->ap = ap;
table->a = action;
+ TAILQ_INIT(&table->flows);
+ TAILQ_INIT(&table->meter_profiles);
+ memset(&table->dscp_table, 0, sizeof(table->dscp_table));
pipeline->n_tables++;
return 0;
}
+
+int
+softnic_pipeline_port_out_find(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ const char *name,
+ uint32_t *port_id)
+{
+ struct pipeline *pipeline;
+ uint32_t i;
+
+ if (softnic == NULL ||
+ pipeline_name == NULL ||
+ name == NULL ||
+ port_id == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ for (i = 0; i < pipeline->n_ports_out; i++)
+ if (strcmp(pipeline->port_out[i].params.dev_name, name) == 0) {
+ *port_id = i;
+ return 0;
+ }
+
+ return -1;
+}
+
+struct softnic_table_meter_profile *
+softnic_pipeline_table_meter_profile_find(struct softnic_table *table,
+ uint32_t meter_profile_id)
+{
+ struct softnic_table_meter_profile *mp;
+
+ TAILQ_FOREACH(mp, &table->meter_profiles, node)
+ if (mp->meter_profile_id == meter_profile_id)
+ return mp;
+
+ return NULL;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_thread.c b/drivers/net/softnic/rte_eth_softnic_thread.c
index 8a150903..4572adfa 100644
--- a/drivers/net/softnic/rte_eth_softnic_thread.c
+++ b/drivers/net/softnic/rte_eth_softnic_thread.c
@@ -1680,6 +1680,8 @@ softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
struct pipeline *p;
struct pipeline_msg_req *req;
struct pipeline_msg_rsp *rsp;
+ struct softnic_table *table;
+ struct softnic_table_meter_profile *mp;
int status;
/* Check input params */
@@ -1692,20 +1694,40 @@ softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
table_id >= p->n_tables)
return -1;
- if (!pipeline_is_running(p)) {
- struct rte_table_action *a = p->table[table_id].a;
+ table = &p->table[table_id];
+ mp = softnic_pipeline_table_meter_profile_find(table, meter_profile_id);
+ if (mp)
+ return -1;
- status = rte_table_action_meter_profile_add(a,
+ /* Resource Allocation */
+ mp = calloc(1, sizeof(struct softnic_table_meter_profile));
+ if (mp == NULL)
+ return -1;
+
+ mp->meter_profile_id = meter_profile_id;
+ memcpy(&mp->profile, profile, sizeof(mp->profile));
+
+ if (!pipeline_is_running(p)) {
+ status = rte_table_action_meter_profile_add(table->a,
meter_profile_id,
profile);
+ if (status) {
+ free(mp);
+ return status;
+ }
+
+ /* Add profile to the table. */
+ TAILQ_INSERT_TAIL(&table->meter_profiles, mp, node);
return status;
}
/* Allocate request */
req = pipeline_msg_alloc();
- if (req == NULL)
+ if (req == NULL) {
+ free(mp);
return -1;
+ }
/* Write request */
req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
@@ -1715,11 +1737,17 @@ softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
/* Send request and wait for response */
rsp = pipeline_msg_send_recv(p, req);
- if (rsp == NULL)
+ if (rsp == NULL) {
+ free(mp);
return -1;
+ }
/* Read response */
status = rsp->status;
+ if (status == 0)
+ TAILQ_INSERT_TAIL(&table->meter_profiles, mp, node);
+ else
+ free(mp);
/* Free response */
pipeline_msg_free(rsp);
@@ -1874,6 +1902,11 @@ softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic,
dscp_mask,
dscp_table);
+ /* Update table dscp table */
+ if (!status)
+ memcpy(&p->table[table_id].dscp_table, dscp_table,
+ sizeof(p->table[table_id].dscp_table));
+
return status;
}
@@ -1897,6 +1930,11 @@ softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic,
/* Read response */
status = rsp->status;
+ /* Update table dscp table */
+ if (!status)
+ memcpy(&p->table[table_id].dscp_table, dscp_table,
+ sizeof(p->table[table_id].dscp_table));
+
/* Free response */
pipeline_msg_free(rsp);
@@ -2202,29 +2240,37 @@ match_convert(struct softnic_table_rule_match *mh,
ml->acl_add.field_value[0].mask_range.u8 =
mh->match.acl.proto_mask;
- ml->acl_add.field_value[1].value.u32 = sa32[0];
+ ml->acl_add.field_value[1].value.u32 =
+ rte_be_to_cpu_32(sa32[0]);
ml->acl_add.field_value[1].mask_range.u32 =
sa32_depth[0];
- ml->acl_add.field_value[2].value.u32 = sa32[1];
+ ml->acl_add.field_value[2].value.u32 =
+ rte_be_to_cpu_32(sa32[1]);
ml->acl_add.field_value[2].mask_range.u32 =
sa32_depth[1];
- ml->acl_add.field_value[3].value.u32 = sa32[2];
+ ml->acl_add.field_value[3].value.u32 =
+ rte_be_to_cpu_32(sa32[2]);
ml->acl_add.field_value[3].mask_range.u32 =
sa32_depth[2];
- ml->acl_add.field_value[4].value.u32 = sa32[3];
+ ml->acl_add.field_value[4].value.u32 =
+ rte_be_to_cpu_32(sa32[3]);
ml->acl_add.field_value[4].mask_range.u32 =
sa32_depth[3];
- ml->acl_add.field_value[5].value.u32 = da32[0];
+ ml->acl_add.field_value[5].value.u32 =
+ rte_be_to_cpu_32(da32[0]);
ml->acl_add.field_value[5].mask_range.u32 =
da32_depth[0];
- ml->acl_add.field_value[6].value.u32 = da32[1];
+ ml->acl_add.field_value[6].value.u32 =
+ rte_be_to_cpu_32(da32[1]);
ml->acl_add.field_value[6].mask_range.u32 =
da32_depth[1];
- ml->acl_add.field_value[7].value.u32 = da32[2];
+ ml->acl_add.field_value[7].value.u32 =
+ rte_be_to_cpu_32(da32[2]);
ml->acl_add.field_value[7].mask_range.u32 =
da32_depth[2];
- ml->acl_add.field_value[8].value.u32 = da32[3];
+ ml->acl_add.field_value[8].value.u32 =
+ rte_be_to_cpu_32(da32[3]);
ml->acl_add.field_value[8].mask_range.u32 =
da32_depth[3];
@@ -2264,36 +2310,36 @@ match_convert(struct softnic_table_rule_match *mh,
mh->match.acl.proto_mask;
ml->acl_delete.field_value[1].value.u32 =
- sa32[0];
+ rte_be_to_cpu_32(sa32[0]);
ml->acl_delete.field_value[1].mask_range.u32 =
sa32_depth[0];
ml->acl_delete.field_value[2].value.u32 =
- sa32[1];
+ rte_be_to_cpu_32(sa32[1]);
ml->acl_delete.field_value[2].mask_range.u32 =
sa32_depth[1];
ml->acl_delete.field_value[3].value.u32 =
- sa32[2];
+ rte_be_to_cpu_32(sa32[2]);
ml->acl_delete.field_value[3].mask_range.u32 =
sa32_depth[2];
ml->acl_delete.field_value[4].value.u32 =
- sa32[3];
+ rte_be_to_cpu_32(sa32[3]);
ml->acl_delete.field_value[4].mask_range.u32 =
sa32_depth[3];
ml->acl_delete.field_value[5].value.u32 =
- da32[0];
+ rte_be_to_cpu_32(da32[0]);
ml->acl_delete.field_value[5].mask_range.u32 =
da32_depth[0];
ml->acl_delete.field_value[6].value.u32 =
- da32[1];
+ rte_be_to_cpu_32(da32[1]);
ml->acl_delete.field_value[6].mask_range.u32 =
da32_depth[1];
ml->acl_delete.field_value[7].value.u32 =
- da32[2];
+ rte_be_to_cpu_32(da32[2]);
ml->acl_delete.field_value[7].mask_range.u32 =
da32_depth[2];
ml->acl_delete.field_value[8].value.u32 =
- da32[3];
+ rte_be_to_cpu_32(da32[3]);
ml->acl_delete.field_value[8].mask_range.u32 =
da32_depth[3];
@@ -2432,6 +2478,36 @@ action_convert(struct rte_table_action *a,
return status;
}
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TAG,
+ &action->tag);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_DECAP,
+ &action->decap);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_SYM_CRYPTO,
+ &action->sym_crypto);
+
+ if (status)
+ return status;
+ }
+
return 0;
}
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 1d20cb51..88448eff 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1056,8 +1056,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = internals->max_rx_queues;
dev_info->max_tx_queues = internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
dev_info->tx_offload_capa = 0;
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_queue_offload_capa = 0;
@@ -1475,7 +1474,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
PMD_INIT_FUNC_TRACE();
PMD_INIT_LOG(INFO, "Initializing eth_dev %s (driver %s)", data->name,
- dev->device->driver->name);
+ RTE_STR(RTE_SZEDATA2_DRIVER_NAME));
/* Fill internal private structure. */
internals->dev = dev;
@@ -1526,7 +1525,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
ether_addr_copy(&eth_addr, data->mac_addrs);
PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
- dev->device->driver->name, data->name);
+ RTE_STR(RTE_SZEDATA2_DRIVER_NAME), data->name);
return 0;
}
@@ -1545,10 +1544,9 @@ rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
free(internals->sze_dev_path);
- rte_free(dev->data->mac_addrs);
PMD_DRV_LOG(INFO, "%s device %s successfully uninitialized",
- dev->device->driver->name, dev->data->name);
+ RTE_STR(RTE_SZEDATA2_DRIVER_NAME), dev->data->name);
return 0;
}
diff --git a/drivers/net/tap/Makefile b/drivers/net/tap/Makefile
index 32433653..77482838 100644
--- a/drivers/net/tap/Makefile
+++ b/drivers/net/tap/Makefile
@@ -22,6 +22,7 @@ CFLAGS += -O3
CFLAGS += -I$(SRCDIR)
CFLAGS += -I.
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
LDLIBS += -lrte_bus_vdev -lrte_gso
diff --git a/drivers/net/tap/meson.build b/drivers/net/tap/meson.build
new file mode 100644
index 00000000..9cb7142a
--- /dev/null
+++ b/drivers/net/tap/meson.build
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 Luca Boccassi <bluca@debian.org>
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+sources = files(
+ 'rte_eth_tap.c',
+ 'tap_bpf_api.c',
+ 'tap_flow.c',
+ 'tap_intr.c',
+ 'tap_netlink.c',
+ 'tap_tcmsgs.c',
+)
+
+deps = ['bus_vdev', 'gso', 'hash']
+
+cflags += '-DTAP_MAX_QUEUES=16'
+
+# To maintain the compatibility with the make build system
+# tap_autoconf.h file is still generated.
+# input array for meson symbol search:
+# [ "MACRO to define if found", "header for the search",
+# "enum/define", "symbol to search" ]
+#
+args = [
+ [ 'HAVE_TC_FLOWER', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_UNSPEC' ],
+ [ 'HAVE_TC_VLAN_ID', 'linux/pkt_cls.h',
+ 'TCA_FLOWER_KEY_VLAN_PRIO' ],
+ [ 'HAVE_TC_BPF', 'linux/pkt_cls.h',
+ 'TCA_BPF_UNSPEC' ],
+ [ 'HAVE_TC_BPF_FD', 'linux/pkt_cls.h',
+ 'TCA_BPF_FD' ],
+ [ 'HAVE_TC_ACT_BPF', 'linux/tc_act/tc_bpf.h',
+ 'TCA_ACT_BPF_UNSPEC' ],
+ [ 'HAVE_TC_ACT_BPF_FD', 'linux/tc_act/tc_bpf.h',
+ 'TCA_ACT_BPF_FD' ],
+]
+config = configuration_data()
+allow_experimental_apis = true
+foreach arg:args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2]))
+endforeach
+configure_file(output : 'tap_autoconf.h', configuration : config)
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index feb92b48..e7817e89 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -16,6 +16,8 @@
#include <rte_debug.h>
#include <rte_ip.h>
#include <rte_string_fns.h>
+#include <rte_ethdev.h>
+#include <rte_errno.h>
#include <assert.h>
#include <sys/types.h>
@@ -62,6 +64,10 @@
#define TAP_GSO_MBUFS_NUM \
(TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
+/* IPC key for queue fds sync */
+#define TAP_MP_KEY "tap_mp_sync_queues"
+
+static int tap_devices_count;
static struct rte_vdev_driver pmd_tap_drv;
static struct rte_vdev_driver pmd_tun_drv;
@@ -100,6 +106,17 @@ enum ioctl_mode {
REMOTE_ONLY,
};
+/* Message header to synchronize queues via IPC */
+struct ipc_queues {
+ char port_name[RTE_DEV_NAME_MAX_LEN];
+ int rxq_count;
+ int txq_count;
+ /*
+ * The file descriptors are in the dedicated part
+ * of the Unix message to be translated by the kernel.
+ */
+};
+
static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
/**
@@ -305,8 +322,7 @@ tap_rx_offload_get_queue_capa(void)
return DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_TCP_CKSUM;
}
/* Callback to handle the rx burst of packets to the correct interface and
@@ -316,6 +332,7 @@ static uint16_t
pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
struct rx_queue *rxq = queue;
+ struct pmd_process_private *process_private;
uint16_t num_rx;
unsigned long num_rx_bytes = 0;
uint32_t trigger = tap_trigger;
@@ -324,6 +341,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return 0;
if (trigger)
rxq->trigger_seen = trigger;
+ process_private = rte_eth_devices[rxq->in_port].process_private;
rte_compiler_barrier();
for (num_rx = 0; num_rx < nb_pkts; ) {
struct rte_mbuf *mbuf = rxq->pool;
@@ -332,9 +350,9 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint16_t data_off = rte_pktmbuf_headroom(mbuf);
int len;
- len = readv(rxq->fd, *rxq->iovecs,
- 1 +
- (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+ len = readv(process_private->rxq_fds[rxq->queue_id],
+ *rxq->iovecs,
+ 1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
rxq->nb_rx_desc : 1));
if (len < (int)sizeof(struct tun_pi))
break;
@@ -495,6 +513,9 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
{
int i;
uint16_t l234_hlen;
+ struct pmd_process_private *process_private;
+
+ process_private = rte_eth_devices[txq->out_port].process_private;
for (i = 0; i < num_mbufs; i++) {
struct rte_mbuf *mbuf = pmbufs[i];
@@ -596,7 +617,7 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
/* copy the tx frame data */
- n = writev(txq->fd, iovecs, j);
+ n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
if (n <= 0)
break;
(*num_packets)++;
@@ -686,7 +707,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
txq->stats.errs += nb_pkts - num_tx;
txq->stats.obytes += num_tx_bytes;
- return num_tx;
+ return num_packets;
}
static const char *
@@ -971,19 +992,20 @@ tap_dev_close(struct rte_eth_dev *dev)
{
int i;
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
tap_link_set_down(dev);
tap_flow_flush(dev, NULL);
tap_flow_implicit_flush(internals, NULL);
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
- if (internals->rxq[i].fd != -1) {
- close(internals->rxq[i].fd);
- internals->rxq[i].fd = -1;
+ if (process_private->rxq_fds[i] != -1) {
+ close(process_private->rxq_fds[i]);
+ process_private->rxq_fds[i] = -1;
}
- if (internals->txq[i].fd != -1) {
- close(internals->txq[i].fd);
- internals->txq[i].fd = -1;
+ if (process_private->txq_fds[i] != -1) {
+ close(process_private->txq_fds[i]);
+ process_private->txq_fds[i] = -1;
}
}
@@ -1007,10 +1029,14 @@ static void
tap_rx_queue_release(void *queue)
{
struct rx_queue *rxq = queue;
+ struct pmd_process_private *process_private;
- if (rxq && (rxq->fd > 0)) {
- close(rxq->fd);
- rxq->fd = -1;
+ if (!rxq)
+ return;
+ process_private = rte_eth_devices[rxq->in_port].process_private;
+ if (process_private->rxq_fds[rxq->queue_id] > 0) {
+ close(process_private->rxq_fds[rxq->queue_id]);
+ process_private->rxq_fds[rxq->queue_id] = -1;
rte_pktmbuf_free(rxq->pool);
rte_free(rxq->iovecs);
rxq->pool = NULL;
@@ -1022,10 +1048,15 @@ static void
tap_tx_queue_release(void *queue)
{
struct tx_queue *txq = queue;
+ struct pmd_process_private *process_private;
- if (txq && (txq->fd > 0)) {
- close(txq->fd);
- txq->fd = -1;
+ if (!txq)
+ return;
+ process_private = rte_eth_devices[txq->out_port].process_private;
+
+ if (process_private->txq_fds[txq->queue_id] > 0) {
+ close(process_private->txq_fds[txq->queue_id]);
+ process_private->txq_fds[txq->queue_id] = -1;
}
}
@@ -1210,18 +1241,19 @@ tap_setup_queue(struct rte_eth_dev *dev,
int *other_fd;
const char *dir;
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
struct rte_gso_ctx *gso_ctx;
if (is_rx) {
- fd = &rx->fd;
- other_fd = &tx->fd;
+ fd = &process_private->rxq_fds[qid];
+ other_fd = &process_private->txq_fds[qid];
dir = "rx";
gso_ctx = NULL;
} else {
- fd = &tx->fd;
- other_fd = &rx->fd;
+ fd = &process_private->txq_fds[qid];
+ other_fd = &process_private->rxq_fds[qid];
dir = "tx";
gso_ctx = &tx->gso_ctx;
}
@@ -1274,6 +1306,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_mempool *mp)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
struct rx_queue *rxq = &internals->rxq[rx_queue_id];
struct rte_mbuf **tmp = &rxq->pool;
long iov_max = sysconf(_SC_IOV_MAX);
@@ -1294,6 +1327,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
rxq->mp = mp;
rxq->trigger_seen = 1; /* force initial burst */
rxq->in_port = dev->data->port_id;
+ rxq->queue_id = rx_queue_id;
rxq->nb_rx_desc = nb_desc;
iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
socket_id);
@@ -1332,7 +1366,8 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
}
TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
- internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
+ internals->name, rx_queue_id,
+ process_private->rxq_fds[rx_queue_id]);
return 0;
@@ -1352,6 +1387,7 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
struct tx_queue *txq;
int ret;
uint64_t offloads;
@@ -1360,6 +1396,8 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
return -1;
dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
txq = dev->data->tx_queues[tx_queue_id];
+ txq->out_port = dev->data->port_id;
+ txq->queue_id = tx_queue_id;
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
txq->csum = !!(offloads &
@@ -1372,7 +1410,8 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
return -1;
TAP_LOG(DEBUG,
" TX TUNTAP device name %s, qid %d on fd %d csum %s",
- internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
+ internals->name, tx_queue_id,
+ process_private->txq_fds[tx_queue_id],
txq->csum ? "on" : "off");
return 0;
@@ -1620,6 +1659,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
int numa_node = rte_socket_id();
struct rte_eth_dev *dev;
struct pmd_internals *pmd;
+ struct pmd_process_private *process_private;
struct rte_eth_dev_data *data;
struct ifreq ifr;
int i;
@@ -1634,7 +1674,16 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
goto error_exit_nodev;
}
+ process_private = (struct pmd_process_private *)
+ rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private),
+ RTE_CACHE_LINE_SIZE, dev->device->numa_node);
+
+ if (process_private == NULL) {
+ TAP_LOG(ERR, "Failed to alloc memory for process private");
+ return -1;
+ }
pmd = dev->data->dev_private;
+ dev->process_private = process_private;
pmd->dev = dev;
snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
pmd->type = type;
@@ -1670,8 +1719,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
/* Presetup the fds to -1 as being not valid */
pmd->ka_fd = -1;
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
- pmd->rxq[i].fd = -1;
- pmd->txq[i].fd = -1;
+ process_private->rxq_fds[i] = -1;
+ process_private->txq_fds[i] = -1;
}
if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
@@ -1809,6 +1858,8 @@ error_remote:
error_exit:
if (pmd->ioctl_sock > 0)
close(pmd->ioctl_sock);
+ /* mac_addrs must not be freed alone because part of dev_private */
+ dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(dev);
error_exit_nodev:
@@ -1974,6 +2025,102 @@ leave:
return ret;
}
+/* Request queue file descriptors from secondary to primary. */
+static int
+tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
+{
+ int ret;
+ struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0};
+ struct rte_mp_msg request, *reply;
+ struct rte_mp_reply replies;
+ struct ipc_queues *request_param = (struct ipc_queues *)request.param;
+ struct ipc_queues *reply_param;
+ struct pmd_process_private *process_private = dev->process_private;
+ int queue, fd_iterator;
+
+ /* Prepare the request */
+ strlcpy(request.name, TAP_MP_KEY, sizeof(request.name));
+ strlcpy(request_param->port_name, port_name,
+ sizeof(request_param->port_name));
+ request.len_param = sizeof(*request_param);
+ /* Send request and receive reply */
+ ret = rte_mp_request_sync(&request, &replies, &timeout);
+ if (ret < 0) {
+ TAP_LOG(ERR, "Failed to request queues from primary: %d",
+ rte_errno);
+ return -1;
+ }
+ reply = &replies.msgs[0];
+ reply_param = (struct ipc_queues *)reply->param;
+ TAP_LOG(DEBUG, "Received IPC reply for %s", reply_param->port_name);
+
+ /* Attach the queues from received file descriptors */
+ dev->data->nb_rx_queues = reply_param->rxq_count;
+ dev->data->nb_tx_queues = reply_param->txq_count;
+ fd_iterator = 0;
+ for (queue = 0; queue < reply_param->rxq_count; queue++)
+ process_private->rxq_fds[queue] = reply->fds[fd_iterator++];
+ for (queue = 0; queue < reply_param->txq_count; queue++)
+ process_private->txq_fds[queue] = reply->fds[fd_iterator++];
+
+ return 0;
+}
+
+/* Send the queue file descriptors from the primary process to secondary. */
+static int
+tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer)
+{
+ struct rte_eth_dev *dev;
+ struct pmd_process_private *process_private;
+ struct rte_mp_msg reply;
+ const struct ipc_queues *request_param =
+ (const struct ipc_queues *)request->param;
+ struct ipc_queues *reply_param =
+ (struct ipc_queues *)reply.param;
+ uint16_t port_id;
+ int queue;
+ int ret;
+
+ /* Get requested port */
+ TAP_LOG(DEBUG, "Received IPC request for %s", request_param->port_name);
+ ret = rte_eth_dev_get_port_by_name(request_param->port_name, &port_id);
+ if (ret) {
+ TAP_LOG(ERR, "Failed to get port id for %s",
+ request_param->port_name);
+ return -1;
+ }
+ dev = &rte_eth_devices[port_id];
+ process_private = dev->process_private;
+
+ /* Fill file descriptors for all queues */
+ reply.num_fds = 0;
+ reply_param->rxq_count = 0;
+ for (queue = 0; queue < dev->data->nb_rx_queues; queue++) {
+ reply.fds[reply.num_fds++] = process_private->rxq_fds[queue];
+ reply_param->rxq_count++;
+ }
+ RTE_ASSERT(reply_param->rxq_count == dev->data->nb_rx_queues);
+ RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
+ RTE_ASSERT(reply.num_fds <= RTE_MP_MAX_FD_NUM);
+
+ reply_param->txq_count = 0;
+ for (queue = 0; queue < dev->data->nb_tx_queues; queue++) {
+ reply.fds[reply.num_fds++] = process_private->txq_fds[queue];
+ reply_param->txq_count++;
+ }
+
+ /* Send reply */
+ strlcpy(reply.name, request->name, sizeof(reply.name));
+ strlcpy(reply_param->port_name, request_param->port_name,
+ sizeof(reply_param->port_name));
+ reply.len_param = sizeof(*reply_param);
+ if (rte_mp_reply(&reply, peer) < 0) {
+ TAP_LOG(ERR, "Failed to reply an IPC request to sync queues");
+ return -1;
+ }
+ return 0;
+}
+
/* Open a TAP interface device.
*/
static int
@@ -1987,22 +2134,41 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
char remote_iface[RTE_ETH_NAME_MAX_LEN];
struct ether_addr user_mac = { .addr_bytes = {0} };
struct rte_eth_dev *eth_dev;
+ int tap_devices_count_increased = 0;
strcpy(tuntap_name, "TAP");
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(params) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
TAP_LOG(ERR, "Failed to probe %s", name);
return -1;
}
- /* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &ops;
eth_dev->device = &dev->device;
+ eth_dev->rx_pkt_burst = pmd_rx_burst;
+ eth_dev->tx_pkt_burst = pmd_tx_burst;
+ if (!rte_eal_primary_proc_alive(NULL)) {
+ TAP_LOG(ERR, "Primary process is missing");
+ return -1;
+ }
+ eth_dev->process_private = (struct pmd_process_private *)
+ rte_zmalloc_socket(name,
+ sizeof(struct pmd_process_private),
+ RTE_CACHE_LINE_SIZE,
+ eth_dev->device->numa_node);
+ if (eth_dev->process_private == NULL) {
+ TAP_LOG(ERR,
+ "Failed to alloc memory for process private");
+ return -1;
+ }
+
+ ret = tap_mp_attach_queues(name, eth_dev);
+ if (ret != 0)
+ return -1;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -2050,6 +2216,17 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s",
name, tap_name);
+ /* Register IPC feed callback */
+ if (!tap_devices_count) {
+ ret = rte_mp_action_register(TAP_MP_KEY, tap_mp_sync_queues);
+ if (ret < 0) {
+ TAP_LOG(ERR, "%s: Failed to register IPC callback: %s",
+ tuntap_name, strerror(rte_errno));
+ goto leave;
+ }
+ }
+ tap_devices_count++;
+ tap_devices_count_increased = 1;
ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
ETH_TUNTAP_TYPE_TAP);
@@ -2057,6 +2234,11 @@ leave:
if (ret == -1) {
TAP_LOG(ERR, "Failed to create pmd for %s as %s",
name, tap_name);
+ if (tap_devices_count_increased == 1) {
+ if (tap_devices_count == 1)
+ rte_mp_action_unregister(TAP_MP_KEY);
+ tap_devices_count--;
+ }
tap_unit--; /* Restore the unit number */
}
rte_kvargs_free(kvlist);
@@ -2071,14 +2253,22 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals;
+ struct pmd_process_private *process_private;
int i;
/* find the ethdev entry */
eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (!eth_dev)
- return 0;
+ return -ENODEV;
+
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
internals = eth_dev->data->dev_private;
+ process_private = eth_dev->process_private;
TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
(internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
@@ -2090,18 +2280,21 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
tap_nl_final(internals->nlsk_fd);
}
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
- if (internals->rxq[i].fd != -1) {
- close(internals->rxq[i].fd);
- internals->rxq[i].fd = -1;
+ if (process_private->rxq_fds[i] != -1) {
+ close(process_private->rxq_fds[i]);
+ process_private->rxq_fds[i] = -1;
}
- if (internals->txq[i].fd != -1) {
- close(internals->txq[i].fd);
- internals->txq[i].fd = -1;
+ if (process_private->txq_fds[i] != -1) {
+ close(process_private->txq_fds[i]);
+ process_private->txq_fds[i] = -1;
}
}
close(internals->ioctl_sock);
- rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->process_private);
+ if (tap_devices_count == 1)
+ rte_mp_action_unregister(TAP_MP_KEY);
+ tap_devices_count--;
rte_eth_dev_release_port(eth_dev);
if (internals->ka_fd != -1) {
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 44e2773f..dc3579ac 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -46,7 +46,7 @@ struct rx_queue {
struct rte_mempool *mp; /* Mempool for RX packets */
uint32_t trigger_seen; /* Last seen Rx trigger value */
uint16_t in_port; /* Port ID */
- int fd;
+ uint16_t queue_id; /* queue ID*/
struct pkt_stats stats; /* Stats for this RX queue */
uint16_t nb_rx_desc; /* max number of mbufs available */
struct rte_eth_rxmode *rxmode; /* RX features */
@@ -56,12 +56,13 @@ struct rx_queue {
};
struct tx_queue {
- int fd;
int type; /* Type field - TUN|TAP */
uint16_t *mtu; /* Pointer to MTU from dev_data */
uint16_t csum:1; /* Enable checksum offloading */
struct pkt_stats stats; /* Stats for this TX queue */
struct rte_gso_ctx gso_ctx; /* GSO context */
+ uint16_t out_port; /* Port ID */
+ uint16_t queue_id; /* queue ID*/
};
struct pmd_internals {
@@ -92,6 +93,11 @@ struct pmd_internals {
int ka_fd; /* keep-alive file descriptor */
};
+struct pmd_process_private {
+ int rxq_fds[RTE_PMD_TAP_MAX_QUEUES];
+ int txq_fds[RTE_PMD_TAP_MAX_QUEUES];
+};
+
/* tap_intr.c */
int tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set);
diff --git a/drivers/net/tap/tap_bpf_insns.h b/drivers/net/tap/tap_bpf_insns.h
index 79e3e66b..1a91bbad 100644
--- a/drivers/net/tap/tap_bpf_insns.h
+++ b/drivers/net/tap/tap_bpf_insns.h
@@ -5,7 +5,7 @@
#include <tap_bpf.h>
/* bpf_insn array matching cls_q section. See tap_bpf_program.c file */
-struct bpf_insn cls_q_insns[] = {
+static struct bpf_insn cls_q_insns[] = {
{0x61, 2, 1, 52, 0x00000000},
{0x18, 3, 0, 0, 0xdeadbeef},
{0x00, 0, 0, 0, 0x00000000},
@@ -24,7 +24,7 @@ struct bpf_insn cls_q_insns[] = {
};
/* bpf_insn array matching l3_l4 section. see tap_bpf_program.c file */
-struct bpf_insn l3_l4_hash_insns[] = {
+static struct bpf_insn l3_l4_hash_insns[] = {
{0xbf, 7, 1, 0, 0x00000000},
{0x61, 8, 7, 16, 0x00000000},
{0x61, 6, 7, 76, 0x00000000},
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index 0e01af62..d155618f 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -1567,6 +1567,7 @@ tap_flow_isolate(struct rte_eth_dev *dev,
struct rte_flow_error *error __rte_unused)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
/* normalize 'set' variable to contain 0 or 1 values */
if (set)
@@ -1580,7 +1581,7 @@ tap_flow_isolate(struct rte_eth_dev *dev,
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
*/
- if (!pmd->rxq[0].fd)
+ if (!process_private->rxq_fds[0])
return 0;
if (set) {
struct rte_flow *remote_flow;
@@ -1810,7 +1811,7 @@ tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
#define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
#define SEC_NAME_CLS_Q "cls_q"
-const char *sec_name[SEC_MAX] = {
+static const char *sec_name[SEC_MAX] = {
[SEC_L3_L4] = "l3_l4",
};
diff --git a/drivers/net/tap/tap_intr.c b/drivers/net/tap/tap_intr.c
index fc590181..7af0010e 100644
--- a/drivers/net/tap/tap_intr.c
+++ b/drivers/net/tap/tap_intr.c
@@ -51,6 +51,7 @@ static int
tap_rx_intr_vec_install(struct rte_eth_dev *dev)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
unsigned int rxqs_n = pmd->dev->data->nb_rx_queues;
struct rte_intr_handle *intr_handle = &pmd->intr_handle;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
@@ -71,7 +72,7 @@ tap_rx_intr_vec_install(struct rte_eth_dev *dev)
struct rx_queue *rxq = pmd->dev->data->rx_queues[i];
/* Skip queues that cannot request interrupts. */
- if (!rxq || rxq->fd <= 0) {
+ if (!rxq || process_private->rxq_fds[i] <= 0) {
/* Use invalid intr_vec[] index to disable entry. */
intr_handle->intr_vec[i] =
RTE_INTR_VEC_RXTX_OFFSET +
@@ -79,7 +80,7 @@ tap_rx_intr_vec_install(struct rte_eth_dev *dev)
continue;
}
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
- intr_handle->efds[count] = rxq->fd;
+ intr_handle->efds[count] = process_private->rxq_fds[i];
count++;
}
if (!count)
diff --git a/drivers/net/thunderx/base/meson.build b/drivers/net/thunderx/base/meson.build
index c9d5a8f4..bf4e8608 100644
--- a/drivers/net/thunderx/base/meson.build
+++ b/drivers/net/thunderx/base/meson.build
@@ -7,8 +7,12 @@ sources = [
'nicvf_bsvf.c'
]
+c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
base_lib = static_library('nicvf_base', sources,
- c_args: cflags,
+ c_args: c_args,
dependencies: static_rte_ethdev
)
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index a55c3ca6..879d8899 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -1431,7 +1431,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -1916,14 +1915,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- /* KEEP_CRC offload flag is not supported by PMD
- * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
- */
- if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
- PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
- rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- }
-
if (txmode->mq_mode) {
PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
return -EINVAL;
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index ae440fef..c0bfbf84 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -40,7 +40,6 @@
#define NICVF_RX_OFFLOAD_CAPA ( \
DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_CRC_STRIP | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_SCATTER)
diff --git a/drivers/net/vdev_netvsc/meson.build b/drivers/net/vdev_netvsc/meson.build
new file mode 100644
index 00000000..d3ada878
--- /dev/null
+++ b/drivers/net/vdev_netvsc/meson.build
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+sources = files('vdev_netvsc.c')
+
+allow_experimental_apis = true
+
+cflags_options = [
+ '-Wall',
+ '-Wextra',
+ '-D_BSD_SOURCE',
+ '-D_DEFAULT_SOURCE',
+ '-D_XOPEN_SOURCE=600'
+]
+foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+endforeach
diff --git a/drivers/net/vdev_netvsc/vdev_netvsc.c b/drivers/net/vdev_netvsc/vdev_netvsc.c
index 48717f2f..16303ef5 100644
--- a/drivers/net/vdev_netvsc/vdev_netvsc.c
+++ b/drivers/net/vdev_netvsc/vdev_netvsc.c
@@ -789,7 +789,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_vdev_netvsc,
/** Initialize driver log type. */
RTE_INIT(vdev_netvsc_init_log)
{
- vdev_netvsc_logtype = rte_log_register("pmd.vdev_netvsc");
+ vdev_netvsc_logtype = rte_log_register("pmd.net.vdev_netvsc");
if (vdev_netvsc_logtype >= 0)
rte_log_set_level(vdev_netvsc_logtype, RTE_LOG_NOTICE);
}
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index e58f3221..b38a4b6b 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -30,6 +30,7 @@ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
#define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
+#define ETH_VHOST_POSTCOPY_SUPPORT "postcopy-support"
#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
@@ -38,6 +39,7 @@ static const char *valid_arguments[] = {
ETH_VHOST_CLIENT_ARG,
ETH_VHOST_DEQUEUE_ZERO_COPY,
ETH_VHOST_IOMMU_SUPPORT,
+ ETH_VHOST_POSTCOPY_SUPPORT,
NULL
};
@@ -1070,8 +1072,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
}
static int
@@ -1221,10 +1222,12 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
if (eth_dev == NULL)
goto error;
+ data = eth_dev->data;
eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
if (eth_addr == NULL)
goto error;
+ data->mac_addrs = eth_addr;
*eth_addr = base_eth_addr;
eth_addr->addr_bytes[5] = eth_dev->data->port_id;
@@ -1254,13 +1257,11 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
- data = eth_dev->data;
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
internal->vid = -1;
data->dev_link = pmd_link;
- data->mac_addrs = eth_addr;
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
eth_dev->dev_ops = &ops;
@@ -1292,10 +1293,7 @@ error:
free(internal->dev_name);
}
rte_free(vring_state);
- rte_free(eth_addr);
- if (eth_dev)
- rte_eth_dev_release_port(eth_dev);
- rte_free(internal);
+ rte_eth_dev_release_port(eth_dev);
rte_free(list);
return -1;
@@ -1340,13 +1338,13 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
int client_mode = 0;
int dequeue_zero_copy = 0;
int iommu_support = 0;
+ int postcopy_support = 0;
struct rte_eth_dev *eth_dev;
const char *name = rte_vdev_device_name(dev);
VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
VHOST_LOG(ERR, "Failed to probe %s\n", name);
@@ -1412,6 +1410,16 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
}
+ if (rte_kvargs_count(kvlist, ETH_VHOST_POSTCOPY_SUPPORT) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_POSTCOPY_SUPPORT,
+ &open_int, &postcopy_support);
+ if (ret < 0)
+ goto out_free;
+
+ if (postcopy_support)
+ flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT;
+ }
+
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
@@ -1437,6 +1445,9 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev)
if (eth_dev == NULL)
return -ENODEV;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
+
eth_dev_close(eth_dev);
rte_free(vring_states[eth_dev->data->port_id]);
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 614357da..10a7e3fc 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1697,7 +1697,7 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- return -EPERM;
+ return 0;
virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
@@ -1706,9 +1706,6 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
eth_dev->rx_pkt_burst = NULL;
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
/* reset interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
rte_intr_callback_unregister(eth_dev->intr_handle,
@@ -2166,8 +2163,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
host_features = VTPCI_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index b726ad10..e0f80e5a 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -40,7 +40,10 @@
(VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
1u << VIRTIO_NET_F_GUEST_CSUM | \
1u << VIRTIO_NET_F_GUEST_TSO4 | \
- 1u << VIRTIO_NET_F_GUEST_TSO6)
+ 1u << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1u << VIRTIO_NET_F_CSUM | \
+ 1u << VIRTIO_NET_F_HOST_TSO4 | \
+ 1u << VIRTIO_NET_F_HOST_TSO6)
/*
* CQ function prototype
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 6bd22e54..b6a3c80b 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -567,16 +567,18 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
}
ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
- if (ret < 0) {
- PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
+ if (ret != 1) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci capability list, ret %d", ret);
return -1;
}
while (pos) {
- ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
- if (ret < 0) {
- PMD_INIT_LOG(ERR,
- "failed to read pci cap at pos: %x", pos);
+ ret = rte_pci_read_config(dev, &cap, 2, pos);
+ if (ret != 2) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos: %x ret %d",
+ pos, ret);
break;
}
@@ -586,7 +588,16 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
* 1st byte is cap ID; 2nd byte is the position of next
* cap; next two bytes are the flags.
*/
- uint16_t flags = ((uint16_t *)&cap)[1];
+ uint16_t flags;
+
+ ret = rte_pci_read_config(dev, &flags, sizeof(flags),
+ pos + 2);
+ if (ret != sizeof(flags)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos:"
+ " %x ret %d", pos + 2, ret);
+ break;
+ }
if (flags & PCI_MSIX_ENABLE)
hw->use_msix = VIRTIO_MSIX_ENABLED;
@@ -601,6 +612,14 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
goto next;
}
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret != sizeof(cap)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos: %x ret %d",
+ pos, ret);
+ break;
+ }
+
PMD_INIT_LOG(DEBUG,
"[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
@@ -689,25 +708,37 @@ enum virtio_msix_status
vtpci_msix_detect(struct rte_pci_device *dev)
{
uint8_t pos;
- struct virtio_pci_cap cap;
int ret;
ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
- if (ret < 0) {
- PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
+ if (ret != 1) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci capability list, ret %d", ret);
return VIRTIO_MSIX_NONE;
}
while (pos) {
- ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
- if (ret < 0) {
- PMD_INIT_LOG(ERR,
- "failed to read pci cap at pos: %x", pos);
+ uint8_t cap[2];
+
+ ret = rte_pci_read_config(dev, cap, sizeof(cap), pos);
+ if (ret != sizeof(cap)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos: %x ret %d",
+ pos, ret);
break;
}
- if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
- uint16_t flags = ((uint16_t *)&cap)[1];
+ if (cap[0] == PCI_CAP_ID_MSIX) {
+ uint16_t flags;
+
+ ret = rte_pci_read_config(dev, &flags, sizeof(flags),
+ pos + sizeof(cap));
+ if (ret != sizeof(flags)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos:"
+ " %x ret %d", pos + 2, ret);
+ break;
+ }
if (flags & PCI_MSIX_ENABLE)
return VIRTIO_MSIX_ENABLED;
@@ -715,7 +746,7 @@ vtpci_msix_detect(struct rte_pci_device *dev)
return VIRTIO_MSIX_DISABLED;
}
- pos = cap.cap_next;
+ pos = cap[1];
}
return VIRTIO_MSIX_NONE;
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 31e565b4..f8bcbaa1 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -47,7 +47,7 @@ virtio_rxq_vec_setup(struct virtnet_rx *rxq)
}
/* Stub for linkage when arch specific implementation is not available */
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
virtio_recv_pkts_vec(void *rx_queue __rte_unused,
struct rte_mbuf **rx_pkts __rte_unused,
uint16_t nb_pkts __rte_unused)
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h
index 668cc99f..83a85cc6 100644
--- a/drivers/net/virtio/virtio_user/vhost.h
+++ b/drivers/net/virtio/virtio_user/vhost.h
@@ -88,7 +88,7 @@ struct virtio_user_backend_ops {
int enable);
};
-struct virtio_user_backend_ops ops_user;
-struct virtio_user_backend_ops ops_kernel;
+extern struct virtio_user_backend_ops virtio_ops_user;
+extern struct virtio_user_backend_ops virtio_ops_kernel;
#endif
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
index b2444096..6b19180d 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -70,41 +70,44 @@ static uint64_t vhost_req_user_to_kernel[] = {
[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
};
-struct walk_arg {
- struct vhost_memory_kernel *vm;
- uint32_t region_nr;
-};
static int
-add_memory_region(const struct rte_memseg_list *msl __rte_unused,
- const struct rte_memseg *ms, size_t len, void *arg)
+add_memseg_list(const struct rte_memseg_list *msl, void *arg)
{
- struct walk_arg *wa = arg;
+ struct vhost_memory_kernel *vm = arg;
struct vhost_memory_region *mr;
void *start_addr;
+ uint64_t len;
- if (wa->region_nr >= max_regions)
+ if (msl->external)
+ return 0;
+
+ if (vm->nregions >= max_regions)
return -1;
- mr = &wa->vm->regions[wa->region_nr++];
- start_addr = ms->addr;
+ start_addr = msl->base_va;
+ len = msl->page_sz * msl->memseg_arr.len;
+
+ mr = &vm->regions[vm->nregions++];
mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
mr->memory_size = len;
- mr->mmap_offset = 0;
+ mr->mmap_offset = 0; /* flags_padding */
+
+ PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
+ vm->nregions - 1, start_addr, len);
return 0;
}
-/* By default, vhost kernel module allows 64 regions, but DPDK allows
- * 256 segments. As a relief, below function merges those virtually
- * adjacent memsegs into one region.
+/* By default, vhost kernel module allows 64 regions, but DPDK may
+ * have much more memory regions. Below function will treat each
+ * contiguous memory space reserved by DPDK as one region.
*/
static struct vhost_memory_kernel *
prepare_vhost_memory_kernel(void)
{
struct vhost_memory_kernel *vm;
- struct walk_arg wa;
vm = malloc(sizeof(struct vhost_memory_kernel) +
max_regions *
@@ -112,16 +115,18 @@ prepare_vhost_memory_kernel(void)
if (!vm)
return NULL;
- wa.region_nr = 0;
- wa.vm = vm;
+ vm->nregions = 0;
+ vm->padding = 0;
- if (rte_memseg_contig_walk(add_memory_region, &wa) < 0) {
+ /*
+ * The memory lock has already been taken by memory subsystem
+ * or virtio_user_start_device().
+ */
+ if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
free(vm);
return NULL;
}
- vm->nregions = wa.region_nr;
- vm->padding = 0;
return vm;
}
@@ -147,8 +152,8 @@ prepare_vhost_memory_kernel(void)
(1ULL << VIRTIO_NET_F_HOST_TSO6) | \
(1ULL << VIRTIO_NET_F_CSUM))
-static int
-tap_supporte_mq(void)
+static unsigned int
+tap_support_features(void)
{
int tapfd;
unsigned int tap_features;
@@ -167,7 +172,7 @@ tap_supporte_mq(void)
}
close(tapfd);
- return tap_features & IFF_MULTI_QUEUE;
+ return tap_features;
}
static int
@@ -181,6 +186,7 @@ vhost_kernel_ioctl(struct virtio_user_dev *dev,
struct vhost_memory_kernel *vm = NULL;
int vhostfd;
unsigned int queue_sel;
+ unsigned int features;
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
@@ -234,17 +240,20 @@ vhost_kernel_ioctl(struct virtio_user_dev *dev,
}
if (!ret && req_kernel == VHOST_GET_FEATURES) {
+ features = tap_support_features();
/* with tap as the backend, all these features are supported
* but not claimed by vhost-net, so we add them back when
* reporting to upper layer.
*/
- *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
- *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+ if (features & IFF_VNET_HDR) {
+ *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+ }
/* vhost_kernel will not declare this feature, but it does
* support multi-queue.
*/
- if (tap_supporte_mq())
+ if (features & IFF_MULTI_QUEUE)
*(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
}
@@ -339,7 +348,7 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
hdr_size = sizeof(struct virtio_net_hdr);
tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
- (char *)dev->mac_addr);
+ (char *)dev->mac_addr, dev->features);
if (tapfd < 0) {
PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
return -1;
@@ -355,7 +364,7 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
return 0;
}
-struct virtio_user_backend_ops ops_kernel = {
+struct virtio_user_backend_ops virtio_ops_kernel = {
.setup = vhost_kernel_setup,
.send_request = vhost_kernel_ioctl,
.enable_qp = vhost_kernel_enable_queue_pair
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index 9ea7ade7..a3faf1d0 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -16,21 +16,55 @@
#include "vhost_kernel_tap.h"
#include "../virtio_logs.h"
+#include "../virtio_pci.h"
+
+static int
+vhost_kernel_tap_set_offload(int fd, uint64_t features)
+{
+ unsigned int offload = 0;
+
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
+ offload |= TUN_F_CSUM;
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_TSO4))
+ offload |= TUN_F_TSO4;
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_TSO6))
+ offload |= TUN_F_TSO6;
+ if (features & ((1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6)) &&
+ (features & (1ULL << VIRTIO_NET_F_GUEST_ECN)))
+ offload |= TUN_F_TSO_ECN;
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_UFO))
+ offload |= TUN_F_UFO;
+ }
+
+ if (offload != 0) {
+ /* Check if our kernel supports TUNSETOFFLOAD */
+ if (ioctl(fd, TUNSETOFFLOAD, 0) != 0 && errno == EINVAL) {
+ PMD_DRV_LOG(ERR, "Kernel does't support TUNSETOFFLOAD\n");
+ return -ENOTSUP;
+ }
+
+ if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) {
+ offload &= ~TUN_F_UFO;
+ if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) {
+ PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
int
vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
- const char *mac)
+ const char *mac, uint64_t features)
{
unsigned int tap_features;
int sndbuf = INT_MAX;
struct ifreq ifr;
int tapfd;
- unsigned int offload =
- TUN_F_CSUM |
- TUN_F_TSO4 |
- TUN_F_TSO6 |
- TUN_F_TSO_ECN |
- TUN_F_UFO;
/* TODO:
* 1. verify we can get/set vnet_hdr_len, tap_probe_vnet_hdr_len
@@ -90,13 +124,7 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
goto error;
}
- /* TODO: before set the offload capabilities, we'd better (1) check
- * negotiated features to see if necessary to offload; (2) query tap
- * to see if it supports the offload capabilities.
- */
- if (ioctl(tapfd, TUNSETOFFLOAD, offload) != 0)
- PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
- strerror(errno));
+ vhost_kernel_tap_set_offload(tapfd, features);
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
index 01a026f5..e0e95b4f 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -36,4 +36,4 @@
#define PATH_NET_TUN "/dev/net/tun"
int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
- const char *mac);
+ const char *mac, uint64_t features);
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index ef6e43df..2c6eba0a 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -11,6 +11,9 @@
#include <string.h>
#include <errno.h>
+#include <rte_fbarray.h>
+#include <rte_eal_memconfig.h>
+
#include "vhost.h"
#include "virtio_user_dev.h"
@@ -121,133 +124,103 @@ fail:
return -1;
}
-struct hugepage_file_info {
- uint64_t addr; /**< virtual addr */
- size_t size; /**< the file size */
- char path[PATH_MAX]; /**< path to backing file */
+struct walk_arg {
+ struct vhost_memory *vm;
+ int *fds;
+ int region_nr;
};
-/* Two possible options:
- * 1. Match HUGEPAGE_INFO_FMT to find the file storing struct hugepage_file
- * array. This is simple but cannot be used in secondary process because
- * secondary process will close and munmap that file.
- * 2. Match HUGEFILE_FMT to find hugepage files directly.
- *
- * We choose option 2.
- */
static int
-get_hugepage_file_info(struct hugepage_file_info huges[], int max)
+update_memory_region(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
{
- int idx, k, exist;
- FILE *f;
- char buf[BUFSIZ], *tmp, *tail;
- char *str_underline, *str_start;
- int huge_index;
- uint64_t v_start, v_end;
- struct stat stats;
-
- f = fopen("/proc/self/maps", "r");
- if (!f) {
- PMD_DRV_LOG(ERR, "cannot open /proc/self/maps");
+ struct walk_arg *wa = arg;
+ struct vhost_memory_region *mr;
+ uint64_t start_addr, end_addr;
+ size_t offset;
+ int i, fd;
+
+ fd = rte_memseg_get_fd_thread_unsafe(ms);
+ if (fd < 0) {
+ PMD_DRV_LOG(ERR, "Failed to get fd, ms=%p rte_errno=%d",
+ ms, rte_errno);
return -1;
}
- idx = 0;
- while (fgets(buf, sizeof(buf), f) != NULL) {
- if (sscanf(buf, "%" PRIx64 "-%" PRIx64, &v_start, &v_end) < 2) {
- PMD_DRV_LOG(ERR, "Failed to parse address");
- goto error;
- }
+ if (rte_memseg_get_fd_offset_thread_unsafe(ms, &offset) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to get offset, ms=%p rte_errno=%d",
+ ms, rte_errno);
+ return -1;
+ }
- tmp = strchr(buf, ' ') + 1; /** skip address */
- tmp = strchr(tmp, ' ') + 1; /** skip perm */
- tmp = strchr(tmp, ' ') + 1; /** skip offset */
- tmp = strchr(tmp, ' ') + 1; /** skip dev */
- tmp = strchr(tmp, ' ') + 1; /** skip inode */
- while (*tmp == ' ') /** skip spaces */
- tmp++;
- tail = strrchr(tmp, '\n'); /** remove newline if exists */
- if (tail)
- *tail = '\0';
-
- /* Match HUGEFILE_FMT, aka "%s/%smap_%d",
- * which is defined in eal_filesystem.h
- */
- str_underline = strrchr(tmp, '_');
- if (!str_underline)
- continue;
+ start_addr = (uint64_t)(uintptr_t)ms->addr;
+ end_addr = start_addr + ms->len;
- str_start = str_underline - strlen("map");
- if (str_start < tmp)
+ for (i = 0; i < wa->region_nr; i++) {
+ if (wa->fds[i] != fd)
continue;
- if (sscanf(str_start, "map_%d", &huge_index) != 1)
- continue;
+ mr = &wa->vm->regions[i];
- /* skip duplicated file which is mapped to different regions */
- for (k = 0, exist = -1; k < idx; ++k) {
- if (!strcmp(huges[k].path, tmp)) {
- exist = k;
- break;
- }
- }
- if (exist >= 0)
- continue;
+ if (mr->userspace_addr + mr->memory_size < end_addr)
+ mr->memory_size = end_addr - mr->userspace_addr;
- if (idx >= max) {
- PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
- goto error;
+ if (mr->userspace_addr > start_addr) {
+ mr->userspace_addr = start_addr;
+ mr->guest_phys_addr = start_addr;
}
- huges[idx].addr = v_start;
- huges[idx].size = v_end - v_start; /* To be corrected later */
- snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
- idx++;
+ if (mr->mmap_offset > offset)
+ mr->mmap_offset = offset;
+
+ PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64
+ " addr=0x%" PRIx64 " len=%" PRIu64, i, fd,
+ mr->mmap_offset, mr->userspace_addr,
+ mr->memory_size);
+
+ return 0;
}
- /* correct the size for files who have many regions */
- for (k = 0; k < idx; ++k) {
- if (stat(huges[k].path, &stats) < 0) {
- PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n",
- huges[k].path, strerror(errno));
- continue;
- }
- huges[k].size = stats.st_size;
- PMD_DRV_LOG(INFO, "file %s, size %zx\n",
- huges[k].path, huges[k].size);
+ if (i >= VHOST_MEMORY_MAX_NREGIONS) {
+ PMD_DRV_LOG(ERR, "Too many memory regions");
+ return -1;
}
- fclose(f);
- return idx;
+ mr = &wa->vm->regions[i];
+ wa->fds[i] = fd;
-error:
- fclose(f);
- return -1;
+ mr->guest_phys_addr = start_addr;
+ mr->userspace_addr = start_addr;
+ mr->memory_size = ms->len;
+ mr->mmap_offset = offset;
+
+ PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64
+ " addr=0x%" PRIx64 " len=%" PRIu64, i, fd,
+ mr->mmap_offset, mr->userspace_addr,
+ mr->memory_size);
+
+ wa->region_nr++;
+
+ return 0;
}
static int
prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
{
- int i, num;
- struct hugepage_file_info huges[VHOST_MEMORY_MAX_NREGIONS];
- struct vhost_memory_region *mr;
+ struct walk_arg wa;
- num = get_hugepage_file_info(huges, VHOST_MEMORY_MAX_NREGIONS);
- if (num < 0) {
- PMD_INIT_LOG(ERR, "Failed to prepare memory for vhost-user");
- return -1;
- }
+ wa.region_nr = 0;
+ wa.vm = &msg->payload.memory;
+ wa.fds = fds;
- for (i = 0; i < num; ++i) {
- mr = &msg->payload.memory.regions[i];
- mr->guest_phys_addr = huges[i].addr; /* use vaddr! */
- mr->userspace_addr = huges[i].addr;
- mr->memory_size = huges[i].size;
- mr->mmap_offset = 0;
- fds[i] = open(huges[i].path, O_RDWR);
- }
+ /*
+ * The memory lock has already been taken by memory subsystem
+ * or virtio_user_start_device().
+ */
+ if (rte_memseg_walk_thread_unsafe(update_memory_region, &wa) < 0)
+ return -1;
- msg->payload.memory.nregions = num;
+ msg->payload.memory.nregions = wa.region_nr;
msg->payload.memory.padding = 0;
return 0;
@@ -280,7 +253,7 @@ vhost_user_sock(struct virtio_user_dev *dev,
int need_reply = 0;
int fds[VHOST_MEMORY_MAX_NREGIONS];
int fd_num = 0;
- int i, len;
+ int len;
int vhostfd = dev->vhostfd;
RTE_SET_USED(m);
@@ -364,10 +337,6 @@ vhost_user_sock(struct virtio_user_dev *dev,
return -1;
}
- if (req == VHOST_USER_SET_MEM_TABLE)
- for (i = 0; i < fd_num; ++i)
- close(fds[i]);
-
if (need_reply) {
if (vhost_user_read(vhostfd, &msg) < 0) {
PMD_DRV_LOG(ERR, "Received msg failed: %s",
@@ -497,7 +466,7 @@ vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
return 0;
}
-struct virtio_user_backend_ops ops_user = {
+struct virtio_user_backend_ops virtio_ops_user = {
.setup = vhost_user_setup,
.send_request = vhost_user_sock,
.enable_qp = vhost_user_enable_queue_pair
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 7df600b0..b4997ee3 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -13,6 +13,8 @@
#include <sys/types.h>
#include <sys/stat.h>
+#include <rte_eal_memconfig.h>
+
#include "vhost.h"
#include "virtio_user_dev.h"
#include "../virtio_ethdev.h"
@@ -109,9 +111,24 @@ is_vhost_user_by_type(const char *path)
int
virtio_user_start_device(struct virtio_user_dev *dev)
{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t features;
int ret;
+ /*
+ * XXX workaround!
+ *
+ * We need to make sure that the locks will be
+ * taken in the correct order to avoid deadlocks.
+ *
+ * Before releasing this lock, this thread should
+ * not trigger any memory hotplug events.
+ *
+ * This is a temporary workaround, and should be
+ * replaced when we get proper supports from the
+ * memory subsystem in the future.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
pthread_mutex_lock(&dev->mutex);
if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
@@ -152,10 +169,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
dev->started = true;
pthread_mutex_unlock(&dev->mutex);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
return 0;
error:
pthread_mutex_unlock(&dev->mutex);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
/* TODO: free resource here or caller to check */
return -1;
}
@@ -282,8 +301,14 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
void *arg)
{
struct virtio_user_dev *dev = arg;
+ struct rte_memseg_list *msl;
uint16_t i;
+ /* ignore externally allocated memory */
+ msl = rte_mem_virt2memseg_list(addr);
+ if (msl->external)
+ return;
+
pthread_mutex_lock(&dev->mutex);
if (dev->started == false)
@@ -319,12 +344,12 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
return -1;
}
- dev->ops = &ops_user;
+ dev->ops = &virtio_ops_user;
} else {
if (is_vhost_user_by_type(dev->path)) {
- dev->ops = &ops_user;
+ dev->ops = &virtio_ops_user;
} else {
- dev->ops = &ops_kernel;
+ dev->ops = &virtio_ops_kernel;
dev->vhostfds = malloc(dev->max_queue_pairs *
sizeof(int));
@@ -530,13 +555,11 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
/* Server mode can't enable queue pairs if vhostfd is invalid,
* always return 0 in this case.
*/
- if (dev->vhostfd >= 0) {
+ if (!dev->is_server || dev->vhostfd >= 0) {
for (i = 0; i < q_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 1);
for (i = q_pairs; i < dev->max_queue_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 0);
- } else if (!dev->is_server) {
- ret = ~0;
}
dev->queue_pairs = q_pairs;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 525d16ca..b51cbc85 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -422,7 +422,6 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
if (!dev) {
PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed");
rte_eth_dev_release_port(eth_dev);
- rte_free(hw);
return NULL;
}
@@ -449,7 +448,6 @@ virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
struct virtio_hw *hw = data->dev_private;
rte_free(hw->virtio_user_dev);
- rte_free(hw);
rte_eth_dev_release_port(eth_dev);
}
@@ -637,7 +635,6 @@ end:
return ret;
}
-/** Called by rte_eth_dev_detach() */
static int
virtio_user_pmd_remove(struct rte_vdev_device *vdev)
{
@@ -662,7 +659,6 @@ virtio_user_pmd_remove(struct rte_vdev_device *vdev)
dev = hw->virtio_user_dev;
virtio_user_dev_uninit(dev);
- rte_free(eth_dev->data->dev_private);
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/vmxnet3/meson.build b/drivers/net/vmxnet3/meson.build
new file mode 100644
index 00000000..a92bd286
--- /dev/null
+++ b/drivers/net/vmxnet3/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+allow_experimental_apis = true
+sources += files(
+ 'vmxnet3_ethdev.c',
+ 'vmxnet3_rxtx.c',
+)
+
+error_cflags = [
+ '-Wno-unused-parameter', '-Wno-unused-value',
+ '-Wno-strict-aliasing', '-Wno-format-extra-args',
+]
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 2613cd13..41bcd450 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -57,8 +57,7 @@
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_CRC_STRIP)
+ DEV_RX_OFFLOAD_JUMBO_FRAME)
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
@@ -78,6 +77,7 @@ static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
+static void vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats,
unsigned int n);
@@ -120,6 +120,7 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.stats_get = vmxnet3_dev_stats_get,
.xstats_get_names = vmxnet3_dev_xstats_get_names,
.xstats_get = vmxnet3_dev_xstats_get,
+ .stats_reset = vmxnet3_dev_stats_reset,
.mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
@@ -160,8 +161,8 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
- snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->device->driver->name, dev->data->port_id, post_string);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
+ dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
if (!reuse) {
@@ -335,6 +336,10 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
+ /* clear snapshot stats */
+ memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
+ memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
+
/* set the initial link status */
memset(&link, 0, sizeof(link));
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -363,9 +368,6 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
return 0;
}
@@ -890,7 +892,49 @@ vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
-#undef VMXNET3_UPDATE_RX_STATS
+#undef VMXNET3_UPDATE_RX_STAT
+}
+
+static void
+vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_TxStats *res)
+{
+ vmxnet3_hw_tx_stats_get(hw, q, res);
+
+#define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r) \
+ ((r)->f -= (h)->snapshot_tx_stats[(i)].f)
+
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
+ VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
+
+#undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
+}
+
+static void
+vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
+ struct UPT1_RxStats *res)
+{
+ vmxnet3_hw_rx_stats_get(hw, q, res);
+
+#define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r) \
+ ((r)->f -= (h)->snapshot_rx_stats[(i)].f)
+
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
+ VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
+
+#undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
}
static void
@@ -1005,7 +1049,7 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
for (i = 0; i < hw->num_tx_queues; i++) {
- vmxnet3_hw_tx_stats_get(hw, i, &txStats);
+ vmxnet3_tx_stats_get(hw, i, &txStats);
stats->q_opackets[i] = txStats.ucastPktsTxOK +
txStats.mcastPktsTxOK +
@@ -1022,7 +1066,7 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
for (i = 0; i < hw->num_rx_queues; i++) {
- vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
+ vmxnet3_rx_stats_get(hw, i, &rxStats);
stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
rxStats.mcastPktsRxOK +
@@ -1044,6 +1088,30 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static void
+vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct UPT1_TxStats txStats;
+ struct UPT1_RxStats rxStats;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
+
+ for (i = 0; i < hw->num_tx_queues; i++) {
+ vmxnet3_hw_tx_stats_get(hw, i, &txStats);
+ memcpy(&hw->snapshot_tx_stats[i], &txStats,
+ sizeof(hw->snapshot_tx_stats[0]));
+ }
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
+ memcpy(&hw->snapshot_rx_stats[i], &rxStats,
+ sizeof(hw->snapshot_rx_stats[0]));
+ }
+}
+
+static void
vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index d3f2b352..5bc3a84c 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -98,6 +98,9 @@ struct vmxnet3_hw {
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
UPT1_TxStats saved_tx_stats[VMXNET3_MAX_TX_QUEUES];
UPT1_RxStats saved_rx_stats[VMXNET3_MAX_RX_QUEUES];
+
+ UPT1_TxStats snapshot_tx_stats[VMXNET3_MAX_TX_QUEUES];
+ UPT1_RxStats snapshot_rx_stats[VMXNET3_MAX_RX_QUEUES];
};
#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
diff --git a/drivers/raw/dpaa2_cmdif/Makefile b/drivers/raw/dpaa2_cmdif/Makefile
index 9b863dda..9bd5ff22 100644
--- a/drivers/raw/dpaa2_cmdif/Makefile
+++ b/drivers/raw/dpaa2_cmdif/Makefile
@@ -21,10 +21,11 @@ LDLIBS += -lrte_eal
LDLIBS += -lrte_kvargs
LDLIBS += -lrte_mempool_dpaa2
LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_common_dpaax
EXPORT_MAP := rte_pmd_dpaa2_cmdif_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
diff --git a/drivers/raw/dpaa2_cmdif/meson.build b/drivers/raw/dpaa2_cmdif/meson.build
index 1d146872..37bb24a1 100644
--- a/drivers/raw/dpaa2_cmdif/meson.build
+++ b/drivers/raw/dpaa2_cmdif/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
deps += ['rawdev', 'mempool_dpaa2', 'bus_vdev']
sources = files('dpaa2_cmdif.c')
diff --git a/drivers/raw/dpaa2_qdma/Makefile b/drivers/raw/dpaa2_qdma/Makefile
index d88809ea..bdd99c97 100644
--- a/drivers/raw/dpaa2_qdma/Makefile
+++ b/drivers/raw/dpaa2_qdma/Makefile
@@ -22,10 +22,11 @@ LDLIBS += -lrte_mempool
LDLIBS += -lrte_mempool_dpaa2
LDLIBS += -lrte_rawdev
LDLIBS += -lrte_ring
+LDLIBS += -lrte_common_dpaax
EXPORT_MAP := rte_pmd_dpaa2_qdma_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
index 2787d302..f474442d 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -34,10 +34,10 @@ static struct qdma_hw_queue_list qdma_queue_list
= TAILQ_HEAD_INITIALIZER(qdma_queue_list);
/* QDMA Virtual Queues */
-struct qdma_virt_queue *qdma_vqs;
+static struct qdma_virt_queue *qdma_vqs;
/* QDMA per core data */
-struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
+static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
static struct qdma_hw_queue *
alloc_hw_queue(uint32_t lcore_id)
@@ -805,7 +805,7 @@ dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
DPAA2_QDMA_ERR("dmdmai disable failed");
/* Set up the DQRR storage for Rx */
- for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
if (rxq->q_storage) {
@@ -856,17 +856,17 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
ret);
goto init_err;
}
- dpdmai_dev->num_queues = attr.num_of_priorities;
+ dpdmai_dev->num_queues = attr.num_of_queues;
/* Set up Rx Queues */
- for (i = 0; i < attr.num_of_priorities; i++) {
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
struct dpaa2_queue *rxq;
memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
CMD_PRI_LOW,
dpdmai_dev->token,
- i, &rx_queue_cfg);
+ i, 0, &rx_queue_cfg);
if (ret) {
DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
ret);
@@ -893,9 +893,9 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
}
/* Get Rx and Tx queues FQID's */
- for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
- dpdmai_dev->token, i, &rx_attr);
+ dpdmai_dev->token, i, 0, &rx_attr);
if (ret) {
DPAA2_QDMA_ERR("Reading device failed with err: %d",
ret);
@@ -904,7 +904,7 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
- dpdmai_dev->token, i, &tx_attr);
+ dpdmai_dev->token, i, 0, &tx_attr);
if (ret) {
DPAA2_QDMA_ERR("Reading device failed with err: %d",
ret);
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
index c6a05780..0cbe9025 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -11,6 +11,8 @@ struct qdma_io_meta;
#define DPAA2_QDMA_MAX_FLE 3
#define DPAA2_QDMA_MAX_SDD 2
+#define DPAA2_DPDMAI_MAX_QUEUES 8
+
/** FLE pool size: 3 Frame list + 2 source/destination descriptor */
#define QDMA_FLE_POOL_SIZE (sizeof(struct qdma_io_meta) + \
sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
@@ -142,9 +144,9 @@ struct dpaa2_dpdmai_dev {
/** Number of queue in this DPDMAI device */
uint8_t num_queues;
/** RX queues */
- struct dpaa2_queue rx_queue[DPDMAI_PRIO_NUM];
+ struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
/** TX queues */
- struct dpaa2_queue tx_queue[DPDMAI_PRIO_NUM];
+ struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
};
#endif /* __DPAA2_QDMA_H__ */
diff --git a/drivers/raw/dpaa2_qdma/meson.build b/drivers/raw/dpaa2_qdma/meson.build
index b6a081f1..2a4b69c1 100644
--- a/drivers/raw/dpaa2_qdma/meson.build
+++ b/drivers/raw/dpaa2_qdma/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
deps += ['rawdev', 'mempool_dpaa2', 'ring']
sources = files('dpaa2_qdma.c')
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
index f0939dc3..848e5183 100644
--- a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
@@ -104,14 +104,14 @@ static struct feature_info port_features[] = {
.resource_size = sizeof(struct feature_port_header),
.feature_index = PORT_FEATURE_ID_HEADER,
.revision_id = PORT_HEADER_REVISION,
- .ops = &port_hdr_ops,
+ .ops = &ifpga_rawdev_port_hdr_ops,
},
{
.name = PORT_FEATURE_ERR,
.resource_size = sizeof(struct feature_port_error),
.feature_index = PORT_FEATURE_ID_ERROR,
.revision_id = PORT_ERR_REVISION,
- .ops = &port_error_ops,
+ .ops = &ifpga_rawdev_port_error_ops,
},
{
.name = PORT_FEATURE_UMSG,
@@ -124,14 +124,14 @@ static struct feature_info port_features[] = {
.resource_size = sizeof(struct feature_port_uint),
.feature_index = PORT_FEATURE_ID_UINT,
.revision_id = PORT_UINT_REVISION,
- .ops = &port_uint_ops,
+ .ops = &ifpga_rawdev_port_uint_ops,
},
{
.name = PORT_FEATURE_STP,
.resource_size = PORT_FEATURE_STP_REGION_SIZE,
.feature_index = PORT_FEATURE_ID_STP,
.revision_id = PORT_STP_REVISION,
- .ops = &port_stp_ops,
+ .ops = &ifpga_rawdev_port_stp_ops,
},
{
.name = PORT_FEATURE_UAFU,
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
index 7a39a580..4391f2fd 100644
--- a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
@@ -156,10 +156,10 @@ struct fpga_uafu_irq_set {
int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
-extern struct feature_ops port_hdr_ops;
-extern struct feature_ops port_error_ops;
-extern struct feature_ops port_stp_ops;
-extern struct feature_ops port_uint_ops;
+extern struct feature_ops ifpga_rawdev_port_hdr_ops;
+extern struct feature_ops ifpga_rawdev_port_error_ops;
+extern struct feature_ops ifpga_rawdev_port_stp_ops;
+extern struct feature_ops ifpga_rawdev_port_uint_ops;
/* help functions for feature ops */
int fpga_msix_set_block(struct feature *feature, unsigned int start,
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port.c b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
index a962f5b4..8b5668d4 100644
--- a/drivers/raw/ifpga_rawdev/base/ifpga_port.c
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
@@ -326,7 +326,7 @@ static int port_hdr_set_prop(struct feature *feature, struct feature_prop *prop)
return -ENOENT;
}
-struct feature_ops port_hdr_ops = {
+struct feature_ops ifpga_rawdev_port_hdr_ops = {
.init = port_hdr_init,
.uinit = port_hdr_uinit,
.get_prop = port_hdr_get_prop,
@@ -354,7 +354,7 @@ static void port_stp_uinit(struct feature *feature)
dev_info(NULL, "port stp uinit.\n");
}
-struct feature_ops port_stp_ops = {
+struct feature_ops ifpga_rawdev_port_stp_ops = {
.init = port_stp_init,
.uinit = port_stp_uinit,
};
@@ -382,7 +382,7 @@ static void port_uint_uinit(struct feature *feature)
dev_info(NULL, "PORT UINT UInit.\n");
}
-struct feature_ops port_uint_ops = {
+struct feature_ops ifpga_rawdev_port_uint_ops = {
.init = port_uint_init,
.uinit = port_uint_uinit,
};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
index 23db562b..9dd1cf59 100644
--- a/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
@@ -136,7 +136,7 @@ static int port_error_set_prop(struct feature *feature,
return -ENOENT;
}
-struct feature_ops port_error_ops = {
+struct feature_ops ifpga_rawdev_port_error_ops = {
.init = port_error_init,
.uinit = port_error_uinit,
.get_prop = port_error_get_prop,
diff --git a/drivers/raw/ifpga_rawdev/base/meson.build b/drivers/raw/ifpga_rawdev/base/meson.build
index cb655352..03f5112c 100644
--- a/drivers/raw/ifpga_rawdev/base/meson.build
+++ b/drivers/raw/ifpga_rawdev/base/meson.build
@@ -18,8 +18,8 @@ sources = [
]
error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
- '-Wno-format', '-Wno-unused-but-set-variable',
- '-Wno-strict-aliasing'
+ '-Wno-format', '-Wno-error=format-security',
+ '-Wno-strict-aliasing', '-Wno-unused-but-set-variable'
]
c_args = cflags
foreach flag: error_cflags
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
index 3fed0578..32e318fc 100644
--- a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
@@ -542,6 +542,7 @@ ifpga_cfg_probe(struct rte_vdev_device *dev)
int port;
char *name = NULL;
char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
+ int ret = -1;
devargs = dev->device.devargs;
@@ -583,7 +584,7 @@ ifpga_cfg_probe(struct rte_vdev_device *dev)
snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
port, name);
- rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
+ ret = rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
dev_name, devargs->args);
end:
if (kvlist)
@@ -591,7 +592,7 @@ end:
if (name)
free(name);
- return 0;
+ return ret;
}
static int
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
index 6518a2d9..d7630fc6 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
@@ -32,7 +32,7 @@
int skeleton_pmd_logtype;
/* Count of instances */
-uint16_t skeldev_init_once;
+static uint16_t skeldev_init_once;
/**< Rawdev Skeleton dummy driver name */
#define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
index 3405b898..359c9e29 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
@@ -294,13 +294,14 @@ test_rawdev_attr_set_get(void)
"Attribute (Test1) not set correctly (%" PRIu64 ")",
ret_value);
+ free(dummy_value);
+
ret_value = 0;
ret = rte_rawdev_get_attr(TEST_DEV_ID, "Test2", &ret_value);
RTE_TEST_ASSERT_EQUAL(*((int *)(uintptr_t)ret_value), 200,
"Attribute (Test2) not set correctly (%" PRIu64 ")",
ret_value);
- free(dummy_value);
return TEST_SUCCESS;
}